Exemple #1
0
    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        job = engine.prepare_job()
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST, job))
        self.job_ctxt = JobContext(self.params,
                                   job.id,
                                   sections=self.sections,
                                   base_path=base_path,
                                   oq_job_profile=self.job_profile,
                                   oq_job=job)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "1"

        self.job_ctxt.params['SERIALIZE_RESULTS_TO'] = 'xml'
        self.job_ctxt.serialize_results_to = ["xml"]

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.job_ctxt.region.grid

        self.job_ctxt.to_kvs()
Exemple #2
0
def get_running_job(job_id):
    """Helper function which is intended to be run by celery task functions.

    Given the id of an in-progress calculation
    (:class:`openquake.db.models.OqJob`), load all of the calculation
    data from the database and KVS and return a
    :class:`openquake.engine.JobContext` object.

    If the calculation is not currently running, a
    :exception:`JobCompletedError` is raised.

    :returns:
        :class:`openquake.engine.JobContext` object, representing an
        in-progress job. This object is created from cached data in the
        KVS as well as data stored in the relational database.
    :raises JobCompletedError:
        If :meth:`~openquake.engine.JobContext.is_job_completed` returns
        ``True`` for ``job_id``.
    """
    # pylint: disable=W0404
    from openquake.engine import JobContext

    if JobContext.is_job_completed(job_id):
        raise JobCompletedError(job_id)

    job_ctxt = JobContext.from_kvs(job_id)
    if job_ctxt and job_ctxt.params:
        level = job_ctxt.log_level
    else:
        level = 'warn'
    logs.init_logs_amqp_send(level=level, job_id=job_id)

    return job_ctxt
Exemple #3
0
def get_running_job(job_id):
    """Helper function which is intended to be run by celery task functions.

    Given the id of an in-progress calculation
    (:class:`openquake.db.models.OqJob`), load all of the calculation
    data from the database and KVS and return a
    :class:`openquake.engine.JobContext` object.

    If the calculation is not currently running, a
    :exc:`JobCompletedError` is raised.

    :returns:
        :class:`openquake.engine.JobContext` object, representing an
        in-progress job. This object is created from cached data in the
        KVS as well as data stored in the relational database.
    :raises JobCompletedError:
        If :meth:`~openquake.engine.JobContext.is_job_completed` returns
        ``True`` for ``job_id``.
    """
    # pylint: disable=W0404
    from openquake.engine import JobContext

    if JobContext.is_job_completed(job_id):
        raise JobCompletedError(job_id)

    job_ctxt = JobContext.from_kvs(job_id)
    if job_ctxt and job_ctxt.params:
        level = job_ctxt.log_level
    else:
        level = 'warn'
    logs.init_logs_amqp_send(level=level, job_id=job_id)

    return job_ctxt
Exemple #4
0
    def test_get_status_from_db(self):
        self.job = engine._job_from_file(helpers.get_data_path(CONFIG_FILE), "db")
        row = models.OqJob.objects.get(id=self.job.job_id)

        row.status = "failed"
        row.save()
        self.assertEqual("failed", JobContext.get_status_from_db(self.job.job_id))

        row.status = "running"
        row.save()
        self.assertEqual("running", JobContext.get_status_from_db(self.job.job_id))
Exemple #5
0
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job = engine.prepare_job()
        oq_job_profile, params, sections = engine.import_job_profile(
            cfg_path, oq_job)

        self.eb_job = JobContext(
            params, oq_job.id, sections=sections, base_path=base_path,
            oq_job_profile=oq_job_profile, oq_job=oq_job)
Exemple #6
0
    def test_get_status_from_db(self):
        self.job = engine._job_from_file(
            helpers.get_data_path(CONFIG_FILE), 'db')
        row = models.OqJob.objects.get(id=self.job.job_id)

        row.status = "failed"
        row.save()
        self.assertEqual(
            "failed", JobContext.get_status_from_db(self.job.job_id))

        row.status = "running"
        row.save()
        self.assertEqual(
            "running", JobContext.get_status_from_db(self.job.job_id))
Exemple #7
0
    def setUp(self):
        client = kvs.get_client()

        # Delete managed job id info so we can predict the job key
        # which will be allocated for us
        client.delete(kvs.tokens.CURRENT_JOBS)

        self.generated_files = []

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(CONFIG_FILE), job)
        self.job_ctxt = JobContext(params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(CONFIG_WITH_INCLUDES), job)
        self.job_ctxt_with_includes = JobContext(params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)
Exemple #8
0
    def setUp(self):
        inputs = [("fragility", ""), ("exposure", "")]
        self.job = self.setup_classic_job(inputs=inputs)

        kvs.mark_job_as_current(self.job.id)
        kvs.cache_gc(self.job.id)

        self.site = Site(1.0, 1.0)
        block = Block(self.job.id, BLOCK_ID, [self.site])
        block.to_kvs()

        # this region contains a single site, that is exactly
        # a site with longitude == 1.0 and latitude == 1.0
        params = {"REGION_VERTEX": "1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0",
                "REGION_GRID_SPACING": "0.5", "BASE_PATH": ".",
                "OUTPUT_DIR": "."}

        self.job_ctxt = JobContext(params, self.job.id, oq_job=self.job)

        self.em = self._store_em()
        self._store_gmvs([0.40, 0.30, 0.45, 0.35, 0.40])

        self.calculator = ScenarioDamageRiskCalculator(self.job_ctxt)

        # just stubbing out some preprocessing stuff...
        ScenarioDamageRiskCalculator.store_exposure_assets = lambda self: None
        ScenarioDamageRiskCalculator.store_fragility_model = lambda self: None
        ScenarioDamageRiskCalculator.partition = lambda self: None
Exemple #9
0
 def test_is_job_completed(self):
     job_id = engine._job_from_file(helpers.get_data_path(CONFIG_FILE), "db").job_id
     row = models.OqJob.objects.get(id=job_id)
     pairs = [("pending", False), ("running", False), ("succeeded", True), ("failed", True)]
     for status, is_completed in pairs:
         row.status = status
         row.save()
         self.assertEqual(JobContext.is_job_completed(job_id), is_completed)
Exemple #10
0
    def test_can_store_and_read_jobs_from_kvs(self):
        self.job_ctxt._log_level = 'debug'
        self.job_ctxt.params['debug'] = self.job_ctxt.log_level
        try:
            self.job_ctxt.to_kvs()

            job_from_kvs = JobContext.from_kvs(self.job_ctxt.job_id)
            self.assertEqual(self.job_ctxt.params, job_from_kvs.params)
        finally:
            helpers.cleanup_loggers()
Exemple #11
0
    def test_can_store_and_read_jobs_from_kvs(self):
        self.job_ctxt._log_level = 'debug'
        self.job_ctxt.params['debug'] = self.job_ctxt.log_level
        try:
            self.job_ctxt.to_kvs()

            job_from_kvs = JobContext.from_kvs(self.job_ctxt.job_id)
            self.assertEqual(self.job_ctxt.params, job_from_kvs.params)
        finally:
            helpers.cleanup_loggers()
Exemple #12
0
 def test_is_job_completed(self):
     job_id = engine._job_from_file(
         helpers.get_data_path(CONFIG_FILE), 'db').job_id
     row = models.OqJob.objects.get(id=job_id)
     pairs = [('pending', False), ('running', False),
              ('succeeded', True), ('failed', True)]
     for status, is_completed in pairs:
         row.status = status
         row.save()
         self.assertEqual(
             JobContext.is_job_completed(job_id), is_completed)
Exemple #13
0
    def setUp(self):
        client = kvs.get_client()

        # Delete managed job id info so we can predict the job key
        # which will be allocated for us
        client.delete(kvs.tokens.CURRENT_JOBS)

        self.generated_files = []

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_FILE), job)
        self.job_ctxt = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_WITH_INCLUDES), job)
        self.job_ctxt_with_includes = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)
Exemple #14
0
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job = engine.prepare_job()
        oq_job_profile, params, sections = engine.import_job_profile(cfg_path, oq_job)

        self.eb_job = JobContext(
            params, oq_job.id, sections=sections, base_path=base_path, oq_job_profile=oq_job_profile, oq_job=oq_job
        )
    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        job = engine.prepare_job()
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST, job))
        self.job_ctxt = JobContext(
            self.params, job.id, sections=self.sections,
            base_path=base_path, oq_job_profile=self.job_profile,
            oq_job=job)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "1"

        self.job_ctxt.params['SERIALIZE_RESULTS_TO'] = 'xml'

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.job_ctxt.region.grid

        self.job_ctxt.to_kvs()
Exemple #16
0
class JobStatsTestCase(unittest.TestCase):
    '''
    Tests related to capturing job stats.
    '''

    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job = engine.prepare_job()
        oq_job_profile, params, sections = engine.import_job_profile(
            cfg_path, oq_job)

        self.eb_job = JobContext(
            params, oq_job.id, sections=sections, base_path=base_path,
            oq_job_profile=oq_job_profile, oq_job=oq_job)

    def test_record_initial_stats(self):
        '''Verify that
        :py:method:`openquake.engine.JobContext._record_initial_stats`
        reports initial calculation stats.

        As we add fields to the uiapi.job_stats table, this test will need to
        be updated to check for this new information.
        '''
        self.eb_job._record_initial_stats()

        actual_stats = models.JobStats.objects.get(oq_job=self.eb_job.job_id)

        self.assertTrue(actual_stats.start_time is not None)
        self.assertEqual(91, actual_stats.num_sites)
        self.assertEqual(1, actual_stats.realizations)

    def test_job_launch_calls_record_initial_stats(self):
        '''When a job is launched, make sure that
        :py:method:`openquake.engine.JobContext._record_initial_stats`
        is called.
        '''
        # Mock out pieces of the test job so it doesn't actually run.
        eb_haz_calc = ('openquake.calculators.hazard.event_based.core'
                       '.EventBasedHazardCalculator')
        eb_risk_calc = ('openquake.calculators.risk.event_based.core'
                       '.EventBasedRiskCalculator')
        methods = ('initialize', 'pre_execute', 'execute', 'post_execute')

        haz_patchers = [patch('%s.%s' % (eb_haz_calc, m)) for m in methods]
        risk_patchers = [patch('%s.%s' % (eb_risk_calc, m)) for m in methods]

        for p in haz_patchers:
            p.start()
        for p in risk_patchers:
            p.start()

        try:
            record = 'openquake.engine.JobContext._record_initial_stats'

            with patch(record) as record_mock:
                engine._launch_job(
                    self.eb_job, ['general', 'HAZARD', 'RISK'])

                self.assertEqual(1, record_mock.call_count)
        finally:
            for p in haz_patchers:
                p.stop()
            for p in risk_patchers:
                p.stop()
Exemple #17
0
class JobTestCase(unittest.TestCase):

    def setUp(self):
        client = kvs.get_client()

        # Delete managed job id info so we can predict the job key
        # which will be allocated for us
        client.delete(kvs.tokens.CURRENT_JOBS)

        self.generated_files = []

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_FILE), job)
        self.job_ctxt = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_WITH_INCLUDES), job)
        self.job_ctxt_with_includes = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

    def tearDown(self):
        for cfg in self.generated_files:
            try:
                os.remove(cfg)
            except OSError:
                pass

        kvs.cache_gc('::JOB::1::')
        kvs.cache_gc('::JOB::2::')

    def test_job_has_the_correct_sections(self):
        self.assertEqual(["RISK", "HAZARD", "general"], self.job_ctxt.sections)
        self.assertEqual(self.job_ctxt.sections,
                         self.job_ctxt_with_includes.sections)

    def test_job_with_only_hazard_config_only_has_hazard_section(self):
        job_with_only_hazard = \
            helpers.job_from_file(helpers.get_data_path(HAZARD_ONLY))
        self.assertEqual(["HAZARD"], job_with_only_hazard.sections)

    def test_configuration_is_the_same_no_matter_which_way_its_provided(self):

        sha_from_file_key = lambda params, key: params[key].split('!')[1]

        # A unique job key is prepended to these file hashes
        # to enable garabage collection.
        # Thus, we have to do a little voodoo to make this test work.
        src_model = 'SOURCE_MODEL_LOGIC_TREE_FILE'
        gmpe = 'GMPE_LOGIC_TREE_FILE'

        self.job_ctxt.to_kvs()
        self.job_ctxt_with_includes.to_kvs()

        job1_src_model_sha = sha_from_file_key(self.job_ctxt.params, src_model)
        job2_src_model_sha = sha_from_file_key(
            self.job_ctxt_with_includes.params, src_model)

        self.assertEqual(job1_src_model_sha, job2_src_model_sha)

        del self.job_ctxt.params[src_model]
        del self.job_ctxt_with_includes.params[src_model]

        job1_gmpe_sha = sha_from_file_key(self.job_ctxt.params, gmpe)
        job2_gmpe_sha = sha_from_file_key(self.job_ctxt_with_includes.params,
                                          gmpe)
        self.assertEqual(job1_gmpe_sha, job2_gmpe_sha)

        del self.job_ctxt.params[gmpe]
        del self.job_ctxt_with_includes.params[gmpe]

        self.assertEqual(self.job_ctxt.params,
                         self.job_ctxt_with_includes.params)

    def test_can_store_and_read_jobs_from_kvs(self):
        self.job_ctxt._log_level = 'debug'
        self.job_ctxt.params['debug'] = self.job_ctxt.log_level
        try:
            self.job_ctxt.to_kvs()

            job_from_kvs = JobContext.from_kvs(self.job_ctxt.job_id)
            self.assertEqual(self.job_ctxt.params, job_from_kvs.params)
        finally:
            helpers.cleanup_loggers()
Exemple #18
0
class JobTestCase(unittest.TestCase):

    def setUp(self):
        client = kvs.get_client()

        # Delete managed job id info so we can predict the job key
        # which will be allocated for us
        client.delete(kvs.tokens.CURRENT_JOBS)

        self.generated_files = []

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_FILE), job)
        self.job_ctxt = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_WITH_INCLUDES), job)
        self.job_ctxt_with_includes = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

    def tearDown(self):
        for cfg in self.generated_files:
            try:
                os.remove(cfg)
            except OSError:
                pass

        kvs.cache_gc('::JOB::1::')
        kvs.cache_gc('::JOB::2::')

    def test_job_has_the_correct_sections(self):
        self.assertEqual(["RISK", "HAZARD", "general"], self.job_ctxt.sections)
        self.assertEqual(self.job_ctxt.sections,
                         self.job_ctxt_with_includes.sections)

    def test_job_with_only_hazard_config_only_has_hazard_section(self):
        job_with_only_hazard = \
            helpers.job_from_file(helpers.get_data_path(HAZARD_ONLY))
        self.assertEqual(["HAZARD"], job_with_only_hazard.sections)

    def test_configuration_is_the_same_no_matter_which_way_its_provided(self):

        sha_from_file_key = lambda params, key: params[key].split('!')[1]

        # A unique job key is prepended to these file hashes
        # to enable garabage collection.
        # Thus, we have to do a little voodoo to make this test work.
        src_model = 'SOURCE_MODEL_LOGIC_TREE_FILE'
        gmpe = 'GMPE_LOGIC_TREE_FILE'

        self.job_ctxt.to_kvs()
        self.job_ctxt_with_includes.to_kvs()

        job1_src_model_sha = sha_from_file_key(self.job_ctxt.params, src_model)
        job2_src_model_sha = sha_from_file_key(
            self.job_ctxt_with_includes.params, src_model)

        self.assertEqual(job1_src_model_sha, job2_src_model_sha)

        del self.job_ctxt.params[src_model]
        del self.job_ctxt_with_includes.params[src_model]

        job1_gmpe_sha = sha_from_file_key(self.job_ctxt.params, gmpe)
        job2_gmpe_sha = sha_from_file_key(self.job_ctxt_with_includes.params,
                                          gmpe)
        self.assertEqual(job1_gmpe_sha, job2_gmpe_sha)

        del self.job_ctxt.params[gmpe]
        del self.job_ctxt_with_includes.params[gmpe]

        self.assertEqual(self.job_ctxt.params,
                         self.job_ctxt_with_includes.params)

    def test_can_store_and_read_jobs_from_kvs(self):
        self.job_ctxt._log_level = 'debug'
        self.job_ctxt.params['debug'] = self.job_ctxt.log_level
        try:
            self.job_ctxt.to_kvs()

            job_from_kvs = JobContext.from_kvs(self.job_ctxt.job_id)
            self.assertEqual(self.job_ctxt.params, job_from_kvs.params)
        finally:
            helpers.cleanup_loggers()
Exemple #19
0
    def test_generate_hazard_curves_using_classical_psha(self):

        def verify_realization_haz_curves_stored_to_kvs(the_job, keys):
            """ This just tests to make sure there something in the KVS
            for each key in given list of keys. This does NOT test the
            actual results. """
            # TODO (LB): At some point we need to test the actual
            # results to verify they are correct

            realizations = int(
                the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])

            for realization in xrange(0, realizations):
                for site in the_job.sites_to_compute():
                    key = tokens.hazard_curve_poes_key(
                        the_job.job_id, realization, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_curves_stored_to_kvs(the_job, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard curves have been written to KVS."""

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':

                LOG.debug("verifying KVS entries for mean hazard curves")
                for site in the_job.sites_to_compute():
                    key = tokens.mean_hazard_curve_key(the_job.job_id, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard maps have been written to KVS."""

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                LOG.debug("verifying KVS entries for mean hazard maps")

                for poe in calculator.poes_hazard_maps:
                    for site in the_job.sites_to_compute():
                        key = tokens.mean_hazard_map_key(
                            the_job.job_id, site, poe)
                        self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                     keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard curves have been written to KVS."""

            quantiles = calculator.quantile_levels

            LOG.debug("verifying KVS entries for quantile hazard curves, "\
                "%s quantile values" % len(quantiles))

            for quantile in quantiles:
                for site in the_job.sites_to_compute():
                    key = tokens.quantile_hazard_curve_key(
                        the_job.job_id, site, quantile)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard maps have been written to KVS."""

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                len(quantiles) > 0):

                poes = calculator.poes_hazard_maps

                LOG.debug("verifying KVS entries for quantile hazard maps, "\
                    "%s quantile values, %s PoEs" % (
                    len(quantiles), len(poes)))

                for quantile in quantiles:
                    for poe in poes:
                        for site in the_job.sites_to_compute():
                            key = tokens.quantile_hazard_map_key(
                                the_job.job_id, site, poe, quantile)
                            self.assertTrue(
                                key in keys, "Missing key %s" % key)

        def verify_realization_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a NRML file has been written for each realization,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            realizations = int(
                the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])
            for realization in xrange(0, realizations):

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.hazard_curve_filename(realization))

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a mean hazard curve NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':
                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.mean_hazard_curve_filename())

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_maps_stored_to_nrml(the_job):
            """Tests that a mean hazard map NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                for poe in calculator.poes_hazard_maps:
                    nrml_path = os.path.join(
                        "demos/classical_psha_simple/computed_output",
                        calculator.mean_hazard_map_filename(poe))

                    LOG.debug("validating NRML file for mean hazard map %s" \
                        % nrml_path)

                    self.assertTrue(xml.validates_against_xml_schema(
                        nrml_path, NRML_SCHEMA_PATH),
                        "NRML instance file %s does not validate against "\
                        "schema" % nrml_path)

        def verify_quantile_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard curve NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            for quantile in calculator.quantile_levels:

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.quantile_hazard_curve_filename(quantile))

                LOG.debug("validating NRML file for quantile hazard curve: "\
                    "%s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_quantile_haz_maps_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard map NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                len(quantiles) > 0):

                for poe in calculator.poes_hazard_maps:
                    for quantile in quantiles:
                        nrml_path = os.path.join(
                            "demos/classical_psha_simple/computed_output",
                            calculator.quantile_hazard_map_filename(quantile,
                                                                   poe))

                        LOG.debug("validating NRML file for quantile hazard "\
                            "map: %s" % nrml_path)

                        self.assertTrue(xml.validates_against_xml_schema(
                            nrml_path, NRML_SCHEMA_PATH),
                            "NRML instance file %s does not validate against "\
                            "schema" % nrml_path)

        base_path = helpers.testdata_path("classical_psha_simple")
        path = helpers.testdata_path("classical_psha_simple/config.gem")
        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(path, job)

        the_job = JobContext(
            params, job.id, sections=sections, base_path=base_path,
            serialize_results_to=['db', 'xml'], oq_job_profile=job_profile,
            oq_job=job)
        the_job.to_kvs()

        calc_mode = job_profile.calc_mode
        calculator = CALCULATORS[calc_mode](the_job)

        used_keys = []
        calculator.execute(used_keys)

        verify_realization_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_realization_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard curves: check results of mean and quantile computation
        verify_mean_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                 used_keys)

        verify_mean_haz_curves_stored_to_nrml(the_job, calculator)
        verify_quantile_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard maps: check results of mean and quantile computation
        verify_mean_haz_maps_stored_to_kvs(the_job, calculator, used_keys)
        verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, used_keys)

        verify_mean_haz_maps_stored_to_nrml(the_job)
        verify_quantile_haz_maps_stored_to_nrml(the_job, calculator)
Exemple #20
0
class ScenarioHazardCalculatorTestCase(unittest.TestCase):
    """
    Tests for the Scenario Hazard engine.
    """
    @classmethod
    def setUpClass(cls):
        cls.kvs_client = kvs.get_client()

    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        job = engine.prepare_job()
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST, job))
        self.job_ctxt = JobContext(self.params,
                                   job.id,
                                   sections=self.sections,
                                   base_path=base_path,
                                   oq_job_profile=self.job_profile,
                                   oq_job=job)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "1"

        self.job_ctxt.params['SERIALIZE_RESULTS_TO'] = 'xml'
        self.job_ctxt.serialize_results_to = ["xml"]

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.job_ctxt.region.grid

        self.job_ctxt.to_kvs()

    def tearDown(self):
        # restoring the default java implementation
        scenario.ScenarioHazardCalculator.compute_ground_motion_field = \
            self.default

    def test_multiple_computations_are_triggered(self):
        """The hazard subsystem is able to trigger multiple computations.

        Depending on the value specified by the user in the
        NUMBER_OF_GROUND_MOTION_FIELDS_CALCULATIONS key, the system
        calls the computation of the values for the entire region
        multiple times.
        """

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "3"
        self.job_profile.gmf_calculation_number = 3
        self.job_profile.save()

        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        with patch('openquake.calculators.hazard.scenario.core'
                   '.ScenarioHazardCalculator'
                   '.compute_ground_motion_field') as compute_gmf_mock:
            # the return value needs to be a Java HashMap
            compute_gmf_mock.return_value = java.jclass('HashMap')()
            calculator.execute()

        self.assertEquals(3, compute_gmf_mock.call_count)

    def test__serialize_gmf_one_gmf_serialization_per_calculation(self):
        # A GMF is serialized for each calculation.
        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "3"
        self.job_ctxt.params["SAVE_GMFS"] = "true"
        self.job_profile.gmf_calculation_number = 3
        self.job_profile.save()

        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        with patch('openquake.calculators.hazard.scenario.core'
                   '.ScenarioHazardCalculator'
                   '.compute_ground_motion_field') as compute_gmf_mock:
            # the return value needs to be a Java HashMap
            compute_gmf_mock.return_value = java.jclass('HashMap')()
            with patch('openquake.calculators.hazard.scenario.core'
                       '.ScenarioHazardCalculator'
                       '._serialize_gmf') as serialize_mock:
                calculator.execute()

        self.assertEquals(3, serialize_mock.call_count)

    def test__serialize_gmf_no_serialization_if_gmf_output_not_set(self):
        # The GMFs will only be serialized if SAVE_GMFS == True
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)
        self.assertEqual(False, calculator._serialize_gmf(None, "pga", 0))

    def test__serialize_gmf(self):
        # GMFs are serialized as expected.
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)
        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)
        hashmap = java.jclass("HashMap")()
        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "2"
        self.job_ctxt.params["SAVE_GMFS"] = "true"
        self.job_ctxt.params["REGION_VERTEX"] = ("0.0, 0.0, 0.0, 3.0, "
                                                 "3.0, 3.0, 3.0, 0.0")
        self.job_profile.region = GEOSGeometry(
            shapes.polygon_ewkt_from_coords(
                '0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 3.0, 0.0'))
        self.job_profile.gmf_calculation_number = 2
        self.job_profile.save()

        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        with patch('openquake.calculators.hazard.scenario.core'
                   '.ScenarioHazardCalculator'
                   '.compute_ground_motion_field') as compute_gmf_mock:
            # the return value needs to be a Java HashMap
            compute_gmf_mock.return_value = hashmap
            calculator.execute()

        patht = os.path.join(self.job_ctxt.base_path,
                             self.job_ctxt['OUTPUT_DIR'], "gmf-%s.xml")
        for cnum in range(self.job_profile.gmf_calculation_number):
            path = patht % cnum
            self.assertTrue(os.path.isfile(path),
                            "GMF file not found (%s)" % path)

    def test__prepare_gmf_serialization_with_mmi(self):
        # In case of imt == mmi the GMF values are left unchanged
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)

        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)

        hashmap = java.jclass("HashMap")()

        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)

        expected = {
            shapes.Site(2.0, 1.0): {
                "groundMotion": 0.1
            },
            shapes.Site(2.1, 1.1): {
                "groundMotion": 0.2
            }
        }
        actual = scenario._prepare_gmf_serialization(hashmap, "MMI")
        self.assertEqual(expected, actual)

    def test__prepare_gmf_serialization_with_imt_other_than_mmi(self):
        # In case of imt != mmi the GMF values are transformed as needed.
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)

        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)

        hashmap = java.jclass("HashMap")()

        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)

        expected = {
            shapes.Site(2.0, 1.0): {
                "groundMotion": math.exp(0.1)
            },
            shapes.Site(2.1, 1.1): {
                "groundMotion": math.exp(0.2)
            }
        }
        actual = scenario._prepare_gmf_serialization(hashmap, "PGA")
        self.assertEqual(expected, actual)

    def test_transforms_a_java_gmf_to_dict(self):
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)
        location3 = java.jclass("Location")(1.2, 2.2)

        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)
        site3 = java.jclass("Site")(location3)

        hashmap = java.jclass("HashMap")()

        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)
        hashmap.put(site3, 0.3)

        gmf_as_dict = scenario.gmf_to_dict(hashmap, "MMI")

        for gmv in gmf_as_dict:
            self.assertTrue(gmv["mag"] in (0.1, 0.2, 0.3))
            self.assertTrue(gmv["site_lon"] in (2.0, 2.1, 2.2))
            self.assertTrue(gmv["site_lat"] in (1.0, 1.1, 1.2))

    def test_when_measure_type_is_not_mmi_exp_is_stored(self):
        location = java.jclass("Location")(1.0, 2.0)
        site = java.jclass("Site")(location)

        hashmap = java.jclass("HashMap")()
        hashmap.put(site, 0.1)

        for gmv in scenario.gmf_to_dict(hashmap, "PGA"):
            self.assertEqual(math.exp(0.1), gmv["mag"])

    def test_when_measure_type_is_mmi_we_store_as_is(self):
        location = java.jclass("Location")(1.0, 2.0)
        site = java.jclass("Site")(location)

        hashmap = java.jclass("HashMap")()
        hashmap.put(site, 0.1)

        for gmv in scenario.gmf_to_dict(hashmap, "MMI"):
            self.assertEqual(0.1, gmv["mag"])

    def test_loads_the_rupture_model(self):
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        self.assertEqual("org.opensha.sha.earthquake.EqkRupture",
                         calculator.rupture_model.__class__.__name__)

    def test_the_same_calculator_is_used_between_multiple_invocations(self):
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        gmf_calculator1 = calculator.gmf_calculator([shapes.Site(1.0, 1.0)])
        gmf_calculator2 = calculator.gmf_calculator([shapes.Site(1.0, 1.0)])

        self.assertTrue(gmf_calculator1 == gmf_calculator2)
Exemple #21
0
def create_job(params, **kwargs):
    job_id = kwargs.pop('job_id', 0)

    return JobContext(params, job_id, **kwargs)
class ScenarioHazardCalculatorTestCase(unittest.TestCase):
    """
    Tests for the Scenario Hazard engine.
    """

    @classmethod
    def setUpClass(cls):
        cls.kvs_client = kvs.get_client()

    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        job = engine.prepare_job()
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST, job))
        self.job_ctxt = JobContext(
            self.params, job.id, sections=self.sections,
            base_path=base_path, oq_job_profile=self.job_profile,
            oq_job=job)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "1"

        self.job_ctxt.params['SERIALIZE_RESULTS_TO'] = 'xml'
        self.job_ctxt.serialize_results_to = ["xml"]

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.job_ctxt.region.grid

        self.job_ctxt.to_kvs()

    def tearDown(self):
        # restoring the default java implementation
        scenario.ScenarioHazardCalculator.compute_ground_motion_field = \
            self.default

    def test_multiple_computations_are_triggered(self):
        """The hazard subsystem is able to trigger multiple computations.

        Depending on the value specified by the user in the
        NUMBER_OF_GROUND_MOTION_FIELDS_CALCULATIONS key, the system
        calls the computation of the values for the entire region
        multiple times.
        """

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "3"
        self.job_profile.gmf_calculation_number = 3
        self.job_profile.save()

        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        with patch('openquake.calculators.hazard.scenario.core'
                   '.ScenarioHazardCalculator'
                   '.compute_ground_motion_field') as compute_gmf_mock:
            # the return value needs to be a Java HashMap
            compute_gmf_mock.return_value = java.jclass('HashMap')()
            calculator.execute()

        self.assertEquals(3, compute_gmf_mock.call_count)

    def test__serialize_gmf_one_gmf_serialization_per_calculation(self):
        # A GMF is serialized for each calculation.
        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "3"
        self.job_ctxt.params["SAVE_GMFS"] = "true"
        self.job_profile.gmf_calculation_number = 3
        self.job_profile.save()

        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        with patch('openquake.calculators.hazard.scenario.core'
                   '.ScenarioHazardCalculator'
                   '.compute_ground_motion_field') as compute_gmf_mock:
            # the return value needs to be a Java HashMap
            compute_gmf_mock.return_value = java.jclass('HashMap')()
            with patch('openquake.calculators.hazard.scenario.core'
                       '.ScenarioHazardCalculator'
                       '._serialize_gmf') as serialize_mock:
                calculator.execute()

        self.assertEquals(3, serialize_mock.call_count)

    def test__serialize_gmf_no_serialization_if_gmf_output_not_set(self):
        # The GMFs will only be serialized if SAVE_GMFS == True
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)
        self.assertEqual(False, calculator._serialize_gmf(None, "pga", 0))

    def test__serialize_gmf(self):
        # GMFs are serialized as expected.
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)
        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)
        hashmap = java.jclass("HashMap")()
        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "2"
        self.job_ctxt.params["SAVE_GMFS"] = "true"
        self.job_ctxt.params["REGION_VERTEX"] = ("0.0, 0.0, 0.0, 3.0, "
                                                 "3.0, 3.0, 3.0, 0.0")
        self.job_profile.region = GEOSGeometry(shapes.polygon_ewkt_from_coords(
            '0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 3.0, 0.0'))
        self.job_profile.gmf_calculation_number = 2
        self.job_profile.save()

        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        with patch('openquake.calculators.hazard.scenario.core'
                   '.ScenarioHazardCalculator'
                   '.compute_ground_motion_field') as compute_gmf_mock:
            # the return value needs to be a Java HashMap
            compute_gmf_mock.return_value = hashmap
            calculator.execute()

        patht = os.path.join(self.job_ctxt.base_path,
                            self.job_ctxt['OUTPUT_DIR'], "gmf-%s.xml")
        for cnum in range(self.job_profile.gmf_calculation_number):
            path = patht % cnum
            self.assertTrue(
                os.path.isfile(path), "GMF file not found (%s)" % path)

    def test__prepare_gmf_serialization_with_mmi(self):
        # In case of imt == mmi the GMF values are left unchanged
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)

        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)

        hashmap = java.jclass("HashMap")()

        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)

        expected = {shapes.Site(2.0, 1.0): {"groundMotion": 0.1},
                    shapes.Site(2.1, 1.1): {"groundMotion": 0.2}}
        actual = scenario._prepare_gmf_serialization(hashmap, "MMI")
        self.assertEqual(expected, actual)

    def test__prepare_gmf_serialization_with_imt_other_than_mmi(self):
        # In case of imt != mmi the GMF values are transformed as needed.
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)

        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)

        hashmap = java.jclass("HashMap")()

        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)

        expected = {shapes.Site(2.0, 1.0): {"groundMotion": math.exp(0.1)},
                    shapes.Site(2.1, 1.1): {"groundMotion": math.exp(0.2)}}
        actual = scenario._prepare_gmf_serialization(hashmap, "PGA")
        self.assertEqual(expected, actual)

    def test_transforms_a_java_gmf_to_dict(self):
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)
        location3 = java.jclass("Location")(1.2, 2.2)

        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)
        site3 = java.jclass("Site")(location3)

        hashmap = java.jclass("HashMap")()

        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)
        hashmap.put(site3, 0.3)

        gmf_as_dict = scenario.gmf_to_dict(hashmap, "MMI")

        for gmv in gmf_as_dict:
            self.assertTrue(gmv["mag"] in (0.1, 0.2, 0.3))
            self.assertTrue(gmv["site_lon"] in (2.0, 2.1, 2.2))
            self.assertTrue(gmv["site_lat"] in (1.0, 1.1, 1.2))

    def test_when_measure_type_is_not_mmi_exp_is_stored(self):
        location = java.jclass("Location")(1.0, 2.0)
        site = java.jclass("Site")(location)

        hashmap = java.jclass("HashMap")()
        hashmap.put(site, 0.1)

        for gmv in scenario.gmf_to_dict(hashmap, "PGA"):
            self.assertEqual(math.exp(0.1), gmv["mag"])

    def test_when_measure_type_is_mmi_we_store_as_is(self):
        location = java.jclass("Location")(1.0, 2.0)
        site = java.jclass("Site")(location)

        hashmap = java.jclass("HashMap")()
        hashmap.put(site, 0.1)

        for gmv in scenario.gmf_to_dict(hashmap, "MMI"):
            self.assertEqual(0.1, gmv["mag"])

    def test_loads_the_rupture_model(self):
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        self.assertEqual("org.opensha.sha.earthquake.EqkRupture",
                         calculator.rupture_model.__class__.__name__)

    def test_the_same_calculator_is_used_between_multiple_invocations(self):
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        gmf_calculator1 = calculator.gmf_calculator([shapes.Site(1.0, 1.0)])
        gmf_calculator2 = calculator.gmf_calculator([shapes.Site(1.0, 1.0)])

        self.assertTrue(gmf_calculator1 == gmf_calculator2)
class ScenarioHazardCalculatorTestCase(unittest.TestCase):
    """
    Tests for the Scenario Hazard engine.
    """

    @classmethod
    def setUpClass(cls):
        cls.kvs_client = kvs.get_client()

    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        job = engine.prepare_job()
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST, job))
        self.job_ctxt = JobContext(
            self.params, job.id, sections=self.sections,
            base_path=base_path, oq_job_profile=self.job_profile,
            oq_job=job)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "1"

        self.job_ctxt.params['SERIALIZE_RESULTS_TO'] = 'xml'

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.job_ctxt.region.grid

        self.job_ctxt.to_kvs()

    def tearDown(self):
        # restoring the default java implementation
        scenario.ScenarioHazardCalculator.compute_ground_motion_field = \
            self.default

    def test_multiple_computations_are_triggered(self):
        """The hazard subsystem is able to trigger multiple computations.

        Depending on the value specified by the user in the
        NUMBER_OF_GROUND_MOTION_FIELDS_CALCULATIONS key, the system
        calls the computation of the values for the entire region
        multiple times.
        """

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "3"
        self.job_profile.gmf_calculation_number = 3
        self.job_profile.save()

        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        with patch('openquake.calculators.hazard.scenario.core'
                   '.ScenarioHazardCalculator'
                   '.compute_ground_motion_field') as compute_gmf_mock:
            # the return value needs to be a Java HashMap
            compute_gmf_mock.return_value = java.jclass('HashMap')()
            calculator.execute()

        self.assertEquals(3, compute_gmf_mock.call_count)

    def test_transforms_a_java_gmf_to_dict(self):
        location1 = java.jclass("Location")(1.0, 2.0)
        location2 = java.jclass("Location")(1.1, 2.1)
        location3 = java.jclass("Location")(1.2, 2.2)

        site1 = java.jclass("Site")(location1)
        site2 = java.jclass("Site")(location2)
        site3 = java.jclass("Site")(location3)

        hashmap = java.jclass("HashMap")()

        hashmap.put(site1, 0.1)
        hashmap.put(site2, 0.2)
        hashmap.put(site3, 0.3)

        gmf_as_dict = scenario.gmf_to_dict(hashmap, "MMI")

        for gmv in gmf_as_dict:
            self.assertTrue(gmv["mag"] in (0.1, 0.2, 0.3))
            self.assertTrue(gmv["site_lon"] in (2.0, 2.1, 2.2))
            self.assertTrue(gmv["site_lat"] in (1.0, 1.1, 1.2))

    def test_when_measure_type_is_not_mmi_exp_is_stored(self):
        location = java.jclass("Location")(1.0, 2.0)
        site = java.jclass("Site")(location)

        hashmap = java.jclass("HashMap")()
        hashmap.put(site, 0.1)

        for gmv in scenario.gmf_to_dict(hashmap, "PGA"):
            self.assertEqual(math.exp(0.1), gmv["mag"])

    def test_when_measure_type_is_mmi_we_store_as_is(self):
        location = java.jclass("Location")(1.0, 2.0)
        site = java.jclass("Site")(location)

        hashmap = java.jclass("HashMap")()
        hashmap.put(site, 0.1)

        for gmv in scenario.gmf_to_dict(hashmap, "MMI"):
            self.assertEqual(0.1, gmv["mag"])

    def test_loads_the_rupture_model(self):
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        self.assertEqual("org.opensha.sha.earthquake.EqkRupture",
                         calculator.rupture_model.__class__.__name__)

    def test_the_same_calculator_is_used_between_multiple_invocations(self):
        calculator = scenario.ScenarioHazardCalculator(self.job_ctxt)

        gmf_calculator1 = calculator.gmf_calculator([shapes.Site(1.0, 1.0)])
        gmf_calculator2 = calculator.gmf_calculator([shapes.Site(1.0, 1.0)])

        self.assertTrue(gmf_calculator1 == gmf_calculator2)
Exemple #24
0
    def test_generate_hazard_curves_using_classical_psha(self):
        def verify_realization_haz_curves_stored_to_kvs(the_job, keys):
            """ This just tests to make sure there something in the KVS
            for each key in given list of keys. This does NOT test the
            actual results. """
            # TODO (LB): At some point we need to test the actual
            # results to verify they are correct

            realizations = int(the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])

            for realization in xrange(0, realizations):
                for site in the_job.sites_to_compute():
                    key = tokens.hazard_curve_poes_key(the_job.job_id,
                                                       realization, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_curves_stored_to_kvs(the_job, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard curves have been written to KVS."""

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':

                LOG.debug("verifying KVS entries for mean hazard curves")
                for site in the_job.sites_to_compute():
                    key = tokens.mean_hazard_curve_key(the_job.job_id, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard maps have been written to KVS."""

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                LOG.debug("verifying KVS entries for mean hazard maps")

                for poe in calculator.poes_hazard_maps:
                    for site in the_job.sites_to_compute():
                        key = tokens.mean_hazard_map_key(
                            the_job.job_id, site, poe)
                        self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                     keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard curves have been written to KVS."""

            quantiles = calculator.quantile_levels

            LOG.debug("verifying KVS entries for quantile hazard curves, "\
                "%s quantile values" % len(quantiles))

            for quantile in quantiles:
                for site in the_job.sites_to_compute():
                    key = tokens.quantile_hazard_curve_key(
                        the_job.job_id, site, quantile)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard maps have been written to KVS."""

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != ''
                    and len(quantiles) > 0):

                poes = calculator.poes_hazard_maps

                LOG.debug("verifying KVS entries for quantile hazard maps, "\
                    "%s quantile values, %s PoEs" % (
                    len(quantiles), len(poes)))

                for quantile in quantiles:
                    for poe in poes:
                        for site in the_job.sites_to_compute():
                            key = tokens.quantile_hazard_map_key(
                                the_job.job_id, site, poe, quantile)
                            self.assertTrue(key in keys,
                                            "Missing key %s" % key)

        def verify_realization_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a NRML file has been written for each realization,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            realizations = int(the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])
            for realization in xrange(0, realizations):

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.hazard_curve_filename(realization))

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a mean hazard curve NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':
                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.mean_hazard_curve_filename())

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_maps_stored_to_nrml(the_job):
            """Tests that a mean hazard map NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                for poe in calculator.poes_hazard_maps:
                    nrml_path = os.path.join(
                        "demos/classical_psha_simple/computed_output",
                        calculator.mean_hazard_map_filename(poe))

                    LOG.debug("validating NRML file for mean hazard map %s" \
                        % nrml_path)

                    self.assertTrue(xml.validates_against_xml_schema(
                        nrml_path, NRML_SCHEMA_PATH),
                        "NRML instance file %s does not validate against "\
                        "schema" % nrml_path)

        def verify_quantile_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard curve NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            for quantile in calculator.quantile_levels:

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.quantile_hazard_curve_filename(quantile))

                LOG.debug("validating NRML file for quantile hazard curve: "\
                    "%s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_quantile_haz_maps_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard map NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != ''
                    and len(quantiles) > 0):

                for poe in calculator.poes_hazard_maps:
                    for quantile in quantiles:
                        nrml_path = os.path.join(
                            "demos/classical_psha_simple/computed_output",
                            calculator.quantile_hazard_map_filename(
                                quantile, poe))

                        LOG.debug("validating NRML file for quantile hazard "\
                            "map: %s" % nrml_path)

                        self.assertTrue(xml.validates_against_xml_schema(
                            nrml_path, NRML_SCHEMA_PATH),
                            "NRML instance file %s does not validate against "\
                            "schema" % nrml_path)

        base_path = helpers.testdata_path("classical_psha_simple")
        path = helpers.testdata_path("classical_psha_simple/config.gem")
        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(path, job)

        the_job = JobContext(params,
                             job.id,
                             sections=sections,
                             base_path=base_path,
                             serialize_results_to=['db', 'xml'],
                             oq_job_profile=job_profile,
                             oq_job=job)
        the_job.to_kvs()

        calc_mode = job_profile.calc_mode
        calculator = CALCULATORS[calc_mode](the_job)

        used_keys = []
        calculator.execute(used_keys)

        verify_realization_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_realization_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard curves: check results of mean and quantile computation
        verify_mean_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                 used_keys)

        verify_mean_haz_curves_stored_to_nrml(the_job, calculator)
        verify_quantile_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard maps: check results of mean and quantile computation
        verify_mean_haz_maps_stored_to_kvs(the_job, calculator, used_keys)
        verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, used_keys)

        verify_mean_haz_maps_stored_to_nrml(the_job)
        verify_quantile_haz_maps_stored_to_nrml(the_job, calculator)
Exemple #25
0
class JobStatsTestCase(unittest.TestCase):
    '''
    Tests related to capturing job stats.
    '''

    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job = engine.prepare_job()
        oq_job_profile, params, sections = engine.import_job_profile(
            cfg_path, oq_job)

        self.eb_job = JobContext(
            params, oq_job.id, sections=sections, base_path=base_path,
            oq_job_profile=oq_job_profile, oq_job=oq_job)

    def test_record_initial_stats(self):
        '''Verify that
        :py:method:`openquake.engine.JobContext._record_initial_stats`
        reports initial calculation stats.

        As we add fields to the uiapi.job_stats table, this test will need to
        be updated to check for this new information.
        '''
        self.eb_job._record_initial_stats()

        actual_stats = models.JobStats.objects.get(oq_job=self.eb_job.job_id)

        self.assertTrue(actual_stats.start_time is not None)
        self.assertEqual(91, actual_stats.num_sites)
        self.assertEqual(1, actual_stats.realizations)

    def test_job_launch_calls_record_initial_stats(self):
        '''When a job is launched, make sure that
        :py:method:`openquake.engine.JobContext._record_initial_stats`
        is called.
        '''
        # Mock out pieces of the test job so it doesn't actually run.
        eb_haz_calc = ('openquake.calculators.hazard.event_based.core'
                       '.EventBasedHazardCalculator')
        eb_risk_calc = ('openquake.calculators.risk.event_based.core'
                       '.EventBasedRiskCalculator')
        methods = ('initialize', 'pre_execute', 'execute', 'post_execute')

        haz_patchers = [patch('%s.%s' % (eb_haz_calc, m)) for m in methods]
        risk_patchers = [patch('%s.%s' % (eb_risk_calc, m)) for m in methods]

        for p in haz_patchers:
            p.start()
        for p in risk_patchers:
            p.start()

        try:
            record = 'openquake.engine.JobContext._record_initial_stats'

            with patch(record) as record_mock:
                engine._launch_job(
                    self.eb_job, ['general', 'HAZARD', 'RISK'])

                self.assertEqual(1, record_mock.call_count)
        finally:
            for p in haz_patchers:
                p.stop()
            for p in risk_patchers:
                p.stop()