Example #1
0
    def test_uhs(self):
        # Kick off the engine and run the UHS demo job.
        # When that's done, query the database and check the UHS results.

        exp_results = self._load_expected_results()
        exp_site = Site(0.0, 0.0)  # This calculation operates on a single site

        run_job(self.UHS_DEMO_CONFIG)

        job = OqJob.objects.latest('id')

        uh_spectra = UhSpectra.objects.get(output__oq_job=job.id)

        self.assertEqual(1, uh_spectra.realizations)

        for poe, data in exp_results.items():
            uh_spectrum = UhSpectrum.objects.get(poe=poe,
                                                 uh_spectra=uh_spectra.id)
            uh_spectrum_data = UhSpectrumData.objects.get(
                uh_spectrum=uh_spectrum.id)

            self.assertTrue(
                numpy.allclose(data['sa_values'], uh_spectrum_data.sa_values))
            self.assertTrue(numpy.allclose(data['periods'],
                                           uh_spectra.periods))

            self.assertEqual(0, uh_spectrum_data.realization)
            self.assertEqual(exp_site.point.to_wkt(),
                             uh_spectrum_data.location.wkt)
    def test_complex_fault_demo_hazard_nrml_written_once(self):
        """
        Run the `complex_fault_demo_hazard` demo and verify that the
        NRML files are written only once.
        """

        def filter_multi():
            """Filter and return files that were written more than once."""
            counts = defaultdict(int)
            files = stats.kvs_op("lrange", key, 0, -1)
            for file in files:
                counts[file] += 1
            return [(f, c) for f, c in counts.iteritems() if c > 1]

        job_cfg = helpers.demo_file(os.path.join(
            "complex_fault_demo_hazard", "config.gem"))

        helpers.run_job(job_cfg, output="xml")

        self.job = models.OqCalculation.objects.latest("id")

        key = stats.key_name(
            self.job.id, *stats.STATS_KEYS["hcls_xmlcurvewrites"])
        if key:
            multi_writes = filter_multi()
            self.assertFalse(multi_writes, str(multi_writes))
        key = stats.key_name(
            self.job.id, *stats.STATS_KEYS["hcls_xmlmapwrites"])
        if key:
            multi_writes = filter_multi()
            self.assertFalse(multi_writes, str(multi_writes))
Example #3
0
    def test_run_calc_with_description(self):
        # Test importing and running a job with a config containing the
        # optional DESCRIPTION parameter.

        description = 'Classical PSHA hazard test description'

        orig_cfg_path = demo_file('PeerTestSet1Case2/config.gem')
        mod_cfg_path = os.path.join(demo_file('PeerTestSet1Case2'),
                                    'modified_config.gem')

        # Use ConfigParser to add the DESCRIPTION param to an existing config
        # profile and write a new temporary config file:
        cfg_parser = ConfigParser.ConfigParser()
        cfg_parser.readfp(open(orig_cfg_path, 'r'))
        cfg_parser.set('general', 'DESCRIPTION', description)
        cfg_parser.write(open(mod_cfg_path, 'w'))

        run_job(mod_cfg_path)
        job = OqJob.objects.latest('id')
        job_profile = job.profile()

        self.assertEqual(description, job_profile.description)
        self.assertEqual(description, job.description)

        # Clean up the temporary config file:
        os.unlink(mod_cfg_path)
Example #4
0
    def test_bcr(self):
        # Verify the EAL (Original and Retrofitted) and BCR values to
        # hand-computed results.

        # For the EAL values, a delta of 0.0009 (3 decimal places of precision)
        # is considered reasonable.

        # For the BCR, a delta of 0.009 (2 decimal places of precision) is
        # considered reasonable.

        expected_result = {
        #    site location
            (-122.0, 38.225): {
                # assetRef  eal_orig  eal_retrof  bcr
                'a1': (0.009379, 0.006586, 0.483091)
            }
        }

        helpers.run_job(CONFIG)
        calc_record = OqJob.objects.latest("id")
        self.assertEqual('succeeded', calc_record.status)

        result = self._parse_bcr_map(RESULT)

        try:
            self._assert_bcr_results_equal(expected_result, result)
        finally:
            shutil.rmtree(COMPUTED_OUTPUT)
    def test_peer_test_set_1_case_5(self):
        expected_results = load_exp_hazcurve_results("PeerTestSet1Case5")

        helpers.run_job(helpers.demo_file(
            os.path.join("PeerTestSet1Case5", "config.gem")))

        self._assert_hazcurve_results_are(expected_results)
Example #6
0
    def test_complex_fault_demo_hazard_nrml_written_once(self):
        """
        Run the `complex_fault_demo_hazard` demo and verify that the
        NRML files are written only once.
        """

        def filter_multi():
            """Filter and return files that were written more than once."""
            counts = defaultdict(int)
            files = stats.kvs_op("lrange", key, 0, -1)
            for file in files:
                counts[file] += 1
            return [(f, c) for f, c in counts.iteritems() if c > 1]

        job_cfg = helpers.demo_file(os.path.join(
            "complex_fault_demo_hazard", "config.gem"))

        helpers.run_job(job_cfg, ['--output-type=xml'])

        self.job = models.OqJob.objects.latest("id")

        key = stats.key_name(
            self.job.id, *stats.STATS_KEYS["hcls_xmlcurvewrites"])
        if key:
            multi_writes = filter_multi()
            self.assertFalse(multi_writes, str(multi_writes))
        key = stats.key_name(
            self.job.id, *stats.STATS_KEYS["hcls_xmlmapwrites"])
        if key:
            multi_writes = filter_multi()
            self.assertFalse(multi_writes, str(multi_writes))
    def test_uhs(self):
        # Kick off the engine and run the UHS demo job.
        # When that's done, query the database and check the UHS results.

        exp_results = self._load_expected_results()
        exp_site = Site(0.0, 0.0)  # This calculation operates on a single site

        run_job(self.UHS_DEMO_CONFIG)

        job = OqJob.objects.latest('id')

        uh_spectra = UhSpectra.objects.get(
            output__oq_job=job.id)

        self.assertEqual(1, uh_spectra.realizations)

        for poe, data in exp_results.items():
            uh_spectrum = UhSpectrum.objects.get(poe=poe,
                                                 uh_spectra=uh_spectra.id)
            uh_spectrum_data = UhSpectrumData.objects.get(
                uh_spectrum=uh_spectrum.id)

            self.assertTrue(
                numpy.allclose(data['sa_values'], uh_spectrum_data.sa_values))
            self.assertTrue(
                numpy.allclose(data['periods'], uh_spectra.periods))

            self.assertEqual(0, uh_spectrum_data.realization)
            self.assertEqual(exp_site.point.to_wkt(),
                             uh_spectrum_data.location.wkt)
Example #8
0
    def test_peer_test_set_1_case_5(self):
        expected_results = load_exp_hazcurve_results("PeerTestSet1Case5")

        helpers.run_job(
            helpers.demo_file(os.path.join("PeerTestSet1Case5", "config.gem")))

        self._assert_hazcurve_results_are(expected_results)
    def test_run_calc_with_description(self):
        # Test importing and running a job with a config containing the
        # optional DESCRIPTION parameter.

        description = 'Classical PSHA hazard test description'

        orig_cfg_path = demo_file('PeerTestSet1Case2/config.gem')
        mod_cfg_path = os.path.join(demo_file('PeerTestSet1Case2'),
                                    'modified_config.gem')

        # Use ConfigParser to add the DESCRIPTION param to an existing config
        # profile and write a new temporary config file:
        cfg_parser = ConfigParser.ConfigParser()
        cfg_parser.readfp(open(orig_cfg_path, 'r'))
        cfg_parser.set('general', 'DESCRIPTION', description)
        cfg_parser.write(open(mod_cfg_path, 'w'))

        run_job(mod_cfg_path)
        job = OqJob.objects.latest('id')
        job_profile = job.profile()

        self.assertEqual(description, job_profile.description)
        self.assertEqual(description, job.description)

        # Clean up the temporary config file:
        os.unlink(mod_cfg_path)
    def test_hazard_map_test(self):
        helpers.run_job(helpers.demo_file(
            os.path.join("HazardMapTest", "config.gem")))

        self.job = models.OqCalculation.objects.latest("id")

        path = helpers.demo_file(os.path.join("HazardMapTest",
            "expected_results", "meanHazardMap0.1.dat"))
        expected_map = load_expected_map(path)

        poe = 0.1
        statistic_type = "mean"
        verify_hazmap_results(self, self.job, expected_map, poe,
                              statistic_type)
Example #11
0
    def test_hazard_map_test(self):
        helpers.run_job(helpers.demo_file(
            os.path.join("HazardMapTest", "config.gem")))

        self.job = models.OqJob.objects.latest("id")

        path = helpers.demo_file(os.path.join("HazardMapTest",
            "expected_results", "meanHazardMap0.1.dat"))
        expected_map = load_expected_map(path)

        poe = 0.1
        statistic_type = "mean"
        verify_hazmap_results(self, self.job, expected_map, poe,
                              statistic_type)
    def test_classical_psha_based_risk(self):
        """Run the full hazard+risk job, serialize all results to the db,
        and verify them against expected values."""

        expected_lc_poes = [
            0.03944,
            0.03943,
            0.03857,
            0.03548,
            0.03123,
            0.02708,
            0.02346,
            0.02039,
            0.01780,
            0.01565,
            0.01386,
            0.01118,
            0.00926,
            0.00776,
            0.00654,
            0.00555,
            0.00417,
            0.00338,
            0.00283,
            0.00231,
            0.00182,
            0.00114,
            0.00089,
            0.00082,
            0.00069,
            0.00039,
            0.00024,
            0.00013,
            0.00006,
            0.00002,
            0.00001,
        ]

        helpers.run_job(helpers.demo_file(
            os.path.join('classical_psha_based_risk', 'config.gem')))

        calculation = OqCalculation.objects.latest('id')
        self.assertEqual('succeeded', calculation.status)

        loss_curve = LossCurveData.objects.get(
            loss_curve__output__oq_calculation=calculation.id)

        self.assertTrue(numpy.allclose(expected_lc_poes, loss_curve.poes,
                                       atol=0.0009))
Example #13
0
    def test_disagg(self):
        helpers.run_job(DISAGG_DEMO_CONFIG)

        job_record = OqJob.objects.latest("id")
        self.assertEqual('succeeded', job_record.status)

        self.assertTrue(os.path.exists(XML_OUTPUT_FILE))
        self._verify_xml_output(EXPECTED_XML_FILE, XML_OUTPUT_FILE,
                                job_record.id)

        h5_file = H5_OUTPUT_FILE % job_record.id
        self.assertTrue(os.path.exists(h5_file))
        self._verify_h5(h5_file, job_record.profile())

        # clean up the job hdf5 results dir:
        shutil.rmtree(H5_OUTPUT_DIR % job_record.id)
Example #14
0
    def test_disagg(self):
        helpers.run_job(DISAGG_DEMO_CONFIG)

        job_record = OqCalculation.objects.latest("id")
        self.assertEqual('succeeded', job_record.status)

        self.assertTrue(os.path.exists(XML_OUTPUT_FILE))
        self._verify_xml_output(EXPECTED_XML_FILE, XML_OUTPUT_FILE,
                                job_record.id)

        h5_file = H5_OUTPUT_FILE % job_record.id
        self.assertTrue(os.path.exists(h5_file))
        self._verify_h5(h5_file, job_record.oq_job_profile)

        # clean up the job hdf5 results dir:
        shutil.rmtree(H5_OUTPUT_DIR % job_record.id)
    def test_uhs_output_type_xml(self):
        # Run a calculation with --output-type=xml and check that the expected
        # result files are created in the right location.

        # This location is based on parameters in the UHS config file:
        results_target_dir = demo_file('uhs/computed_output')

        # clear the target dir from previous demo/test runs
        shutil.rmtree(results_target_dir)

        expected_export_files = [
            os.path.join(results_target_dir, 'uhs_poe:0.1.hdf5'),
            os.path.join(results_target_dir, 'uhs_poe:0.02.hdf5'),
            os.path.join(results_target_dir, 'uhs.xml'),
        ]

        for f in expected_export_files:
            self.assertFalse(os.path.exists(f))

        uhs_cfg = demo_file('uhs/config.gem')
        try:
            ret_code = run_job(uhs_cfg, ['--output-type=xml'])
            self.assertEqual(0, ret_code)

            # Check that all the output files were created:
            for f in expected_export_files:
                self.assertTrue(os.path.exists(f))
        finally:
            shutil.rmtree(results_target_dir)
Example #16
0
    def test_hazard_input_on_exposure_sites(self):
        cfg = helpers.demo_file("scenario_risk/config_hzr_exposure.gem")

        ret_code = helpers.run_job(cfg, ["--output-type=xml"])
        self.assertEquals(0, ret_code)

        self.assertEqual("succeeded", OqJob.objects.latest("id").status)
Example #17
0
    def test_uhs_output_type_xml(self):
        # Run a calculation with --output-type=xml and check that the expected
        # result files are created in the right location.

        # This location is based on parameters in the UHS config file:
        results_target_dir = demo_file('uhs/computed_output')

        # clear the target dir from previous demo/test runs
        shutil.rmtree(results_target_dir)

        expected_export_files = [
            os.path.join(results_target_dir, 'uhs_poe:0.1.hdf5'),
            os.path.join(results_target_dir, 'uhs_poe:0.02.hdf5'),
            os.path.join(results_target_dir, 'uhs.xml'),
        ]

        for f in expected_export_files:
            self.assertFalse(os.path.exists(f))

        uhs_cfg = demo_file('uhs/config.gem')
        try:
            ret_code = run_job(uhs_cfg, ['--output-type=xml'])
            self.assertEqual(0, ret_code)

            # Check that all the output files were created:
            for f in expected_export_files:
                self.assertTrue(os.path.exists(f))
        finally:
            shutil.rmtree(results_target_dir)
    def test_hazard_input_on_exposure_sites(self):
        cfg = helpers.demo_file(
            "scenario_risk/config_hzr_exposure.gem")

        ret_code = helpers.run_job(cfg, ["--output-type=xml"])
        self.assertEquals(0, ret_code)

        self.assertEqual("succeeded", OqJob.objects.latest("id").status)
    def test_bcr_event_based(self):
        # First implementation of the QA test for the event based
        # bcr calculator. For now, just run it end-to-end
        # to make sure it doesn't blow up.
        ret_code = helpers.run_job('%s/config_ebased.gem' % BCR_DEMO_BASE,
                ['--output-type=xml'])

        self._verify_job_succeeded(ret_code)
Example #20
0
    def test_bcr_event_based(self):
        # First implementation of the QA test for the event based
        # bcr calculator. For now, just run it end-to-end
        # to make sure it doesn't blow up.
        ret_code = helpers.run_job('%s/config_ebased.gem' % BCR_DEMO_BASE,
                                   ['--output-type=xml'])

        self._verify_job_succeeded(ret_code)
    def test_complex_fault_demo_hazard_nrml(self):
        """
        Run the `complex_fault_demo_hazard` demo and verify all of the
        generated NRML data.
        """
        job_cfg = helpers.demo_file(os.path.join(
            "complex_fault_demo_hazard", "config.gem"))

        exp_results_dir = os.path.join("complex_fault_demo_hazard",
                                       "expected_results")

        helpers.run_job(job_cfg, output="xml")

        self.job = models.OqCalculation.objects.latest("id")

        copath = helpers.demo_file(os.path.join(
            "complex_fault_demo_hazard", "computed_output"))

        try:
            # Check hazard curves for sample 0:
            # Hazard curve expected results for logic tree sample 0:
            hazcurve_0 = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardcurve-0.dat"))
            nrml_path = os.path.join(copath, "hazardcurve-0.xml")
            verify_hazcurve_nrml(self, nrml_path, hazcurve_0)

            # Check mean hazard curves:
            hazcurve_mean = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardcurve-mean.dat"))
            nrml_path = os.path.join(copath, "hazardcurve-mean.xml")
            verify_hazcurve_nrml(self, nrml_path, hazcurve_mean)

            # Check hazard map mean 0.02:
            hazmap_mean_0_02 = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardmap-0.02-mean.dat"))
            nrml_path = os.path.join(copath, "hazardmap-0.02-mean.xml")
            verify_hazmap_nrml(self, nrml_path, hazmap_mean_0_02)

            # Check hazard map mean 0.1:
            hazmap_mean_0_1 = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardmap-0.1-mean.dat"))
            nrml_path = os.path.join(copath, "hazardmap-0.1-mean.xml")
            verify_hazmap_nrml(self, nrml_path, hazmap_mean_0_1)
        finally:
            shutil.rmtree(copath)
Example #22
0
    def test_complex_fault_demo_hazard_nrml(self):
        """
        Run the `complex_fault_demo_hazard` demo and verify all of the
        generated NRML data.
        """
        job_cfg = helpers.demo_file(
            os.path.join("complex_fault_demo_hazard", "config.gem"))

        exp_results_dir = os.path.join("complex_fault_demo_hazard",
                                       "expected_results")

        helpers.run_job(job_cfg, ['--output-type=xml'])

        self.job = models.OqJob.objects.latest("id")

        copath = helpers.demo_file(
            os.path.join("complex_fault_demo_hazard", "computed_output"))

        try:
            # Check hazard curves for sample 0:
            # Hazard curve expected results for logic tree sample 0:
            hazcurve_0 = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardcurve-0.dat"))
            nrml_path = os.path.join(copath, "hazardcurve-0.xml")
            verify_hazcurve_nrml(self, nrml_path, hazcurve_0)

            # Check mean hazard curves:
            hazcurve_mean = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardcurve-mean.dat"))
            nrml_path = os.path.join(copath, "hazardcurve-mean.xml")
            verify_hazcurve_nrml(self, nrml_path, hazcurve_mean)

            # Check hazard map mean 0.02:
            hazmap_mean_0_02 = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardmap-0.02-mean.dat"))
            nrml_path = os.path.join(copath, "hazardmap-0.02-mean.xml")
            verify_hazmap_nrml(self, nrml_path, hazmap_mean_0_02)

            # Check hazard map mean 0.1:
            hazmap_mean_0_1 = helpers.demo_file(
                os.path.join(exp_results_dir, "hazardmap-0.1-mean.dat"))
            nrml_path = os.path.join(copath, "hazardmap-0.1-mean.xml")
            verify_hazmap_nrml(self, nrml_path, hazmap_mean_0_1)
        finally:
            shutil.rmtree(copath)
Example #23
0
    def test_bcr_event_hazard_on_exposure_sites(self):
        # here we compute the hazard on locations
        # defined in the exposure file. For now, we just
        # check the job completes correctly.
        ret_code = helpers.run_job(
            '%s/config_ebased_hzr_exposure.gem' % BCR_DEMO_BASE,
            ['--output-type=xml'])

        self._verify_job_succeeded(ret_code)
    def test_scenario_risk_sample_based(self):
        # This QA test is a longer-running test of the Scenario Risk
        # calculator.

        # The vulnerabiilty model has non-zero Coefficients of Variation and
        # therefore exercises the 'sample-based' path through the calculator.
        # This test is configured to produce 1000 ground motion fields at each
        # location of interest (in the test above, only 10 are produced).

        # Since we're seeding the random epsilon sampling, we can consistently
        # reproduce all result values.

        # When these values are compared to the results computed by a similar
        # config which takes the 'mean-based' path (with CoVs = 0), we expect
        # the following:
        # All of the mean values in the 'sample-based' results should be with
        # 5%, + or -, of the 'mean-based' results.
        # The standard deviation values of the 'sample-based' results should
        # simply be greater than those produced with the 'mean-based' method.

        # For comparison, mean and stddev values for the region were computed
        # with 1000 GMFs using the mean-based approach. These values (rounded
        # to 2 decimal places) are:
        mb_mean_loss = 1233.26
        mb_stddev_loss = 443.63
        # Loss map for the mean-based approach:
        mb_loss_map = [
            dict(asset='a3', pos='15.48 38.25', mean=200.54874638,
                stddev=94.2302991022),
            dict(asset='a2', pos='15.56 38.17', mean=510.821363253,
                stddev=259.964152622),
            dict(asset='a1', pos='15.48 38.09', mean=521.885458891,
                stddev=244.825980356),
        ]

        # Sanity checks are done. Let's do this.
        scen_cfg = helpers.demo_file(
            'scenario_risk/config_sample-based_qa.gem')
        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
            check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.demo_file(
            'scenario_risk/computed_output/loss-map-%s.xml' % job.id)
        self.assertTrue(os.path.exists(expected_loss_map_file))

        loss_map = helpers.loss_map_result_from_file(expected_loss_map_file)
        self._verify_loss_map_within_range(sorted(mb_loss_map),
            sorted(loss_map), 0.05)

        exp_mean_loss, exp_stddev_loss = helpers.mean_stddev_from_result_line(
            result)
        self.assertAlmostEqual(mb_mean_loss, exp_mean_loss,
            delta=mb_mean_loss * 0.05)
        self.assertTrue(exp_stddev_loss > mb_stddev_loss)
Example #25
0
    def test_log_file_access_denied(self):
        # Attempt to log to a location for which the user does not have write
        # access ('/', for example).
        uhs_cfg = helpers.demo_file('uhs/config.gem')

        result = helpers.run_job(uhs_cfg, ['--log-file', '/oq.log'],
                                 check_output=True)
        self.assertEqual(
            'Error writing to log file /oq.log: Permission denied\n', result)
    def test_bcr_event_hazard_on_exposure_sites(self):
        # here we compute the hazard on locations
        # defined in the exposure file. For now, we just
        # check the job completes correctly.
        ret_code = helpers.run_job(
            '%s/config_ebased_hzr_exposure.gem' % BCR_DEMO_BASE,
            ['--output-type=xml'])

        self._verify_job_succeeded(ret_code)
Example #27
0
    def test_scenario_risk(self):
        # This test exercises the 'mean-based' path through the Scenario Risk
        # calculator. There is no random sampling done here so the results are
        # 100% predictable.
        scen_cfg = helpers.demo_file('scenario_risk/config.gem')

        exp_mean_loss = 1053.09
        exp_stddev_loss = 246.62
        expected_loss_map = [
            dict(asset='a3',
                 pos='15.48 38.25',
                 mean=180.717534009275,
                 stddev=92.2122644809969),
            dict(asset='a2',
                 pos='15.56 38.17',
                 mean=432.225448142534,
                 stddev=186.864456949986),
            dict(asset='a1',
                 pos='15.48 38.09',
                 mean=440.147078317589,
                 stddev=182.615976701858),
        ]

        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
                                 check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.demo_file(
            'scenario_risk/computed_output/loss-map-%s.xml' % job.id)

        self.assertTrue(os.path.exists(expected_loss_map_file))

        self._verify_loss_map(expected_loss_map_file, expected_loss_map)

        # We expected the shell output to look something like the following
        # two lines:
        # Mean region loss value: 1053.09
        # Standard deviation region loss value: 246.62

        # split on newline and filter out empty lines
        result = [line for line in result.split('\n') if len(line) > 0]

        # we expect 2 lines; 1 for mean, 1 for stddev
        self.assertEqual(2, len(result))

        actual_mean = float(result[0].split()[-1])
        actual_stddev = float(result[1].split()[-1])

        self.assertAlmostEqual(exp_mean_loss,
                               actual_mean,
                               places=self.TOTAL_LOSS_PRECISION)
        self.assertAlmostEqual(exp_stddev_loss,
                               actual_stddev,
                               places=self.TOTAL_LOSS_PRECISION)
    def test_scenario_risk(self):
        # The rudimentary beginnings of a QA test for the scenario calc.
        # For now, just run it end-to-end to make sure it doesn't blow up.
        scen_cfg = helpers.demo_file('scenario_risk/config.gem')

        ret_code = helpers.run_job(scen_cfg, ['--output-type=xml'])
        self.assertEqual(0, ret_code)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)
    def test_log_file_access_denied(self):
        # Attempt to log to a location for which the user does not have write
        # access ('/', for example).
        uhs_cfg = helpers.demo_file('uhs/config.gem')

        result = helpers.run_job(uhs_cfg, ['--log-file', '/oq.log'],
                                 check_output=True)
        self.assertEqual(
            'Error writing to log file /oq.log: Permission denied\n',
            result)
Example #30
0
    def test_export_uhs(self):
        # Tests the UHS calculation run and export end-to-end.
        # For the export, we only check the quantity, location, and names of
        # each exported file. We don't check the contents; that's covered in
        # other tests.
        uhs_cfg = helpers.demo_file('uhs/config.gem')
        export_target_dir = tempfile.mkdtemp()

        expected_export_files = [
            os.path.join(export_target_dir, 'uhs_poe:0.1.hdf5'),
            os.path.join(export_target_dir, 'uhs_poe:0.02.hdf5'),
            os.path.join(export_target_dir, 'uhs.xml'),
        ]

        # Sanity check and precondition: these files should not exist yet
        for f in expected_export_files:
            self.assertFalse(os.path.exists(f))

        try:
            ret_code = helpers.run_job(uhs_cfg)
            self.assertEqual(0, ret_code)

            job = models.OqJob.objects.latest('id')
            [output] = models.Output.objects.filter(oq_job=job.id)

            # Split into a list, 1 result for each row in the output.
            # The first row of output (the table header) is discarded.
            listed_calcs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ['bin/openquake', '--list-calculations']))

            check_list_calcs(self, listed_calcs, job.id)

            listed_outputs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ['bin/openquake', '--list-outputs',
                     str(job.id)]))

            check_list_outputs(self, listed_outputs, output.id, 'uh_spectra')

            listed_exports = helpers.prepare_cli_output(
                subprocess.check_output([
                    'bin/openquake', '--export',
                    str(output.id), export_target_dir
                ]))

            self.assertEqual(expected_export_files, listed_exports)

            # Check that the files actually have been created,
            # and also verify that the paths are absolute:
            for f in listed_exports:
                self.assertTrue(os.path.exists(f))
                self.assertTrue(os.path.isabs(f))
        finally:
            shutil.rmtree(export_target_dir)
Example #31
0
    def test_bcr_event_based(self):
        # First implementation of the QA test for the event based
        # bcr calculator. For now, just run it end-to-end
        # to make sure it doesn't blow up.
        ret_code = helpers.run_job('%s/config_ebased.gem' % BCR_DEMO_BASE,
                ['--output-type=xml'])

        self.assertEqual(0, ret_code)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)
    def test_export_uhs(self):
        # Tests the UHS calculation run and export end-to-end.
        # For the export, we only check the quantity, location, and names of
        # each exported file. We don't check the contents; that's covered in
        # other tests.
        uhs_cfg = helpers.demo_file('uhs/config.gem')
        export_target_dir = tempfile.mkdtemp()

        expected_export_files = [
            os.path.join(export_target_dir, 'uhs_poe:0.1.hdf5'),
            os.path.join(export_target_dir, 'uhs_poe:0.02.hdf5'),
            os.path.join(export_target_dir, 'uhs.xml'),
        ]

        # Sanity check and precondition: these files should not exist yet
        for f in expected_export_files:
            self.assertFalse(os.path.exists(f))

        try:
            ret_code = helpers.run_job(uhs_cfg)
            self.assertEqual(0, ret_code)

            job = models.OqJob.objects.latest('id')
            [output] = models.Output.objects.filter(
                oq_job=job.id)

            # Split into a list, 1 result for each row in the output.
            # The first row of output (the table header) is discarded.
            listed_calcs = helpers.prepare_cli_output(subprocess.check_output(
                ['openquake/bin/oqscript.py', '--list-calculations']))

            check_list_calcs(self, listed_calcs, job.id)

            listed_outputs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ['openquake/bin/oqscript.py', '--list-outputs',
                     str(job.id)]))

            check_list_outputs(self, listed_outputs, output.id, 'uh_spectra')

            listed_exports = helpers.prepare_cli_output(
                subprocess.check_output(
                    ['openquake/bin/oqscript.py', '--export',
                     str(output.id), export_target_dir]))

            self.assertEqual(expected_export_files, listed_exports)

            # Check that the files actually have been created,
            # and also verify that the paths are absolute:
            for f in listed_exports:
                self.assertTrue(os.path.exists(f))
                self.assertTrue(os.path.isabs(f))
        finally:
            shutil.rmtree(export_target_dir)
    def test_probabilistic_risk(self):
        # The rudimentary beginnings of a QA test for the probabilistic
        # calculator. For now, just run it end-to-end
        # to make sure it doesn't blow up.
        cfg = helpers.demo_file(
            'probabilistic_event_based_risk/config_stest.gem')

        ret_code = helpers.run_job(cfg, ['--output-type=xml'])
        self.assertEqual(0, ret_code)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)
Example #34
0
    def test_complex_fault_demo_hazard(self):
        """Run the `complex_fault_demo_hazard` demo and verify all of the
        resulting hazard curve and hazard map data."""
        job_cfg = helpers.demo_file(
            os.path.join("complex_fault_demo_hazard", "config.gem"))

        exp_results_dir = os.path.join("complex_fault_demo_hazard",
                                       "expected_results")

        helpers.run_job(job_cfg)

        self.job = models.OqJob.objects.latest("id")

        # Check hazard curves for sample 0:
        # Hazard curve expected results for logic tree sample 0:
        hazcurve_0 = helpers.demo_file(
            os.path.join(exp_results_dir, "hazardcurve-0.dat"))
        verify_hazcurve_results(self, self.job, hazcurve_0, end_branch_label=0)

        # Check mean hazard curves:
        hazcurve_mean = helpers.demo_file(
            os.path.join(exp_results_dir, "hazardcurve-mean.dat"))
        verify_hazcurve_results(self,
                                self.job,
                                hazcurve_mean,
                                statistic_type="mean")

        # Check hazard map mean 0.02:
        hazmap_mean_0_02 = helpers.demo_file(
            os.path.join(exp_results_dir, "hazardmap-0.02-mean.dat"))
        verify_hazmap_results(self, self.job,
                              load_expected_map(hazmap_mean_0_02), 0.02,
                              "mean")

        # Check hazard map mean 0.1:
        hazmap_mean_0_1 = helpers.demo_file(
            os.path.join(exp_results_dir, "hazardmap-0.1-mean.dat"))
        verify_hazmap_results(self, self.job,
                              load_expected_map(hazmap_mean_0_1), 0.1, "mean")
Example #35
0
    def test_scenario_risk_insured_losses(self):
        # This test exercises the 'mean-based' path through the Scenario Risk
        # calculator. There is no random sampling done here so the results are
        # 100% predictable.
        scen_cfg = helpers.qa_file('scenario_risk_insured_losses/config.gem')

        exp_mean_loss = 799.102578
        exp_stddev_loss = 382.148808
        expected_loss_map = [
            dict(asset='a3',
                 pos='15.48 38.25',
                 mean=156.750910806,
                 stddev=100.422061776),
            dict(asset='a2',
                 pos='15.56 38.17',
                 mean=314.859579324,
                 stddev=293.976254984),
            dict(asset='a1',
                 pos='15.48 38.09',
                 mean=327.492087529,
                 stddev=288.47906994),
        ]

        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
                                 check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.qa_file(
            'scenario_risk_insured_losses/computed_output/insured-loss-map%s'
            '.xml' % job.id)

        self.assertTrue(os.path.exists(expected_loss_map_file))

        helpers.verify_loss_map(self, expected_loss_map_file,
                                expected_loss_map, self.LOSSMAP_PRECISION)

        actual_mean, actual_stddev = helpers.mean_stddev_from_result_line(
            result)

        self.assertAlmostEqual(exp_mean_loss,
                               actual_mean,
                               places=self.TOTAL_LOSS_PRECISION)
        self.assertAlmostEqual(exp_stddev_loss,
                               actual_stddev,
                               places=self.TOTAL_LOSS_PRECISION)

        # Cleaning generated results file.
        rmtree(QA_OUTPUT_DIR)
    def test_complex_fault_demo_hazard(self):
        """Run the `complex_fault_demo_hazard` demo and verify all of the
        resulting hazard curve and hazard map data."""
        job_cfg = helpers.demo_file(os.path.join(
            "complex_fault_demo_hazard", "config.gem"))

        exp_results_dir = os.path.join("complex_fault_demo_hazard",
                                       "expected_results")

        helpers.run_job(job_cfg)

        self.job = models.OqCalculation.objects.latest("id")

        # Check hazard curves for sample 0:
        # Hazard curve expected results for logic tree sample 0:
        hazcurve_0 = helpers.demo_file(os.path.join(exp_results_dir,
                                                     "hazardcurve-0.dat"))
        verify_hazcurve_results(
            self, self.job, hazcurve_0, end_branch_label=0)

        # Check mean hazard curves:
        hazcurve_mean = helpers.demo_file(os.path.join(exp_results_dir,
                                                       "hazardcurve-mean.dat"))
        verify_hazcurve_results(
            self, self.job, hazcurve_mean, statistic_type="mean")

        # Check hazard map mean 0.02:
        hazmap_mean_0_02 = helpers.demo_file(
            os.path.join(exp_results_dir, "hazardmap-0.02-mean.dat"))
        verify_hazmap_results(
            self, self.job, load_expected_map(hazmap_mean_0_02), 0.02, "mean")

        # Check hazard map mean 0.1:
        hazmap_mean_0_1 = helpers.demo_file(
            os.path.join(exp_results_dir, "hazardmap-0.1-mean.dat"))
        verify_hazmap_results(
            self, self.job, load_expected_map(hazmap_mean_0_1), 0.1, "mean")
Example #37
0
    def test_full_calc_user_assoc(self):
        # Run a full calculation in the same as other QA tests (using
        # `subprocess` to invoke bin/openquake) and check the following:
        # 1. There is an OqUser record for the current user.
        # 2. This user is the owner of all OqJobProfile, OqJob,
        #    and Ouput records.
        cfg_path = demo_file('uhs/config.gem')

        run_job(cfg_path)

        # Get the OqUser for the current user
        user = OqUser.objects.get(user_name=getpass.getuser())

        job = OqJob.objects.latest('id')
        job_profile = job.profile()

        self.assertEqual(user, job.owner)
        self.assertEqual(user, job_profile.owner)

        outputs = Output.objects.filter(oq_job=job.id)
        # We need at least 1 output record, otherwise this test is useless:
        self.assertTrue(len(outputs) > 0)
        for output in outputs:
            self.assertEqual(user, output.owner)
    def test_full_calc_user_assoc(self):
        # Run a full calculation in the same as other QA tests (using
        # `subprocess` to invoke bin/oqscript.py) and check the following:
        # 1. There is an OqUser record for the current user.
        # 2. This user is the owner of all OqJobProfile, OqJob,
        #    and Ouput records.
        cfg_path = demo_file('uhs/config.gem')

        run_job(cfg_path)

        # Get the OqUser for the current user
        user = OqUser.objects.get(user_name=getpass.getuser())

        job = OqJob.objects.latest('id')
        job_profile = job.profile()

        self.assertEqual(user, job.owner)
        self.assertEqual(user, job_profile.owner)

        outputs = Output.objects.filter(oq_job=job.id)
        # We need at least 1 output record, otherwise this test is useless:
        self.assertTrue(len(outputs) > 0)
        for output in outputs:
            self.assertEqual(user, output.owner)
Example #39
0
    def test_log_file(self):
        # Test logging to a file when running bin/oqscript.py.
        uhs_cfg = helpers.demo_file('uhs/config.gem')

        log_file = './%s.log' % helpers.random_string()
        self.assertFalse(os.path.exists(log_file))

        ret_code = helpers.run_job(
            uhs_cfg, ['--log-level', 'debug', '--log-file', log_file])
        self.assertEqual(0, ret_code)

        self.assertTrue(os.path.exists(log_file))
        # Make sure there is something in it.
        self.assertTrue(os.path.getsize(log_file) > 0)

        os.unlink(log_file)
Example #40
0
    def test_scenario_risk(self):
        # This test exercises the 'mean-based' path through the Scenario Risk
        # calculator. There is no random sampling done here so the results are
        # 100% predictable.
        scen_cfg = helpers.demo_file('scenario_risk/config.gem')

        exp_mean_loss = 1053.09
        exp_stddev_loss = 246.62
        expected_loss_map = [
            dict(asset='a3',
                 pos='15.48 38.25',
                 mean=180.717534009275,
                 stddev=92.2122644809969),
            dict(asset='a2',
                 pos='15.56 38.17',
                 mean=432.225448142534,
                 stddev=186.864456949986),
            dict(asset='a1',
                 pos='15.48 38.09',
                 mean=440.147078317589,
                 stddev=182.615976701858),
        ]

        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
                                 check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.demo_file(
            'scenario_risk/computed_output/loss-map-%s.xml' % job.id)

        self.assertTrue(os.path.exists(expected_loss_map_file))

        helpers.verify_loss_map(self, expected_loss_map_file,
                                expected_loss_map, self.LOSSMAP_PRECISION)

        actual_mean, actual_stddev = helpers.mean_stddev_from_result_line(
            result)

        self.assertAlmostEqual(exp_mean_loss,
                               actual_mean,
                               places=self.TOTAL_LOSS_PRECISION)
        self.assertAlmostEqual(exp_stddev_loss,
                               actual_stddev,
                               places=self.TOTAL_LOSS_PRECISION)
Example #41
0
    def test_export_agg_loss_curve(self):
        eb_cfg = helpers.get_data_path(
            'demos/event_based_risk_small/config.gem')
        export_target_dir = tempfile.mkdtemp()

        expected_export_files = [
            os.path.join(export_target_dir, 'aggregate_loss_curve.xml'),
        ]

        try:
            ret_code = helpers.run_job(eb_cfg)
            self.assertEqual(0, ret_code)

            job = models.OqJob.objects.latest('id')
            [output
             ] = models.Output.objects.filter(oq_job=job.id,
                                              output_type='agg_loss_curve')

            listed_calcs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ['bin/openquake', '--list-calculations']))

            check_list_calcs(self, listed_calcs, job.id)

            listed_outputs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ['bin/openquake', '--list-outputs',
                     str(job.id)]))

            check_list_outputs(self, listed_outputs, output.id,
                               'agg_loss_curve')

            listed_exports = helpers.prepare_cli_output(
                subprocess.check_output([
                    'bin/openquake', '--export',
                    str(output.id), export_target_dir
                ]))

            self.assertEqual(expected_export_files, listed_exports)
        finally:
            shutil.rmtree(export_target_dir)
    def test_scenario_risk_insured_losses(self):
        # This test exercises the 'mean-based' path through the Scenario Risk
        # calculator. There is no random sampling done here so the results are
        # 100% predictable.
        scen_cfg = helpers.qa_file('scenario_risk_insured_losses/config.gem')

        exp_mean_loss = 799.102578
        exp_stddev_loss = 382.148808
        expected_loss_map = [
            dict(asset='a3', pos='15.48 38.25', mean=156.750910806,
                stddev=100.422061776),
            dict(asset='a2', pos='15.56 38.17', mean=314.859579324,
                stddev=293.976254984),
            dict(asset='a1', pos='15.48 38.09', mean=327.492087529,
                stddev=288.47906994),
            ]

        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
            check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.qa_file(
            'scenario_risk_insured_losses/computed_output/insured-loss-map%s'
            '.xml' % job.id)

        self.assertTrue(os.path.exists(expected_loss_map_file))

        helpers.verify_loss_map(self, expected_loss_map_file,
            expected_loss_map, self.LOSSMAP_PRECISION)

        actual_mean, actual_stddev = helpers.mean_stddev_from_result_line(result)

        self.assertAlmostEqual(
            exp_mean_loss, actual_mean, places=self.TOTAL_LOSS_PRECISION)
        self.assertAlmostEqual(
            exp_stddev_loss, actual_stddev, places=self.TOTAL_LOSS_PRECISION)

        # Cleaning generated results file.
        rmtree(QA_OUTPUT_DIR)
    def test_export_agg_loss_curve(self):
        eb_cfg = helpers.get_data_path(
            'demos/event_based_risk_small/config.gem')
        export_target_dir = tempfile.mkdtemp()

        expected_export_files = [
            os.path.join(export_target_dir, 'aggregate_loss_curve.xml'),
        ]

        try:
            ret_code = helpers.run_job(eb_cfg)
            self.assertEqual(0, ret_code)

            job = models.OqJob.objects.latest('id')
            [output] = models.Output.objects.filter(
                oq_job=job.id, output_type='agg_loss_curve')

            listed_calcs = helpers.prepare_cli_output(subprocess.check_output(
                ['openquake/bin/oqscript.py', '--list-calculations']))

            check_list_calcs(self, listed_calcs, job.id)

            listed_outputs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ['openquake/bin/oqscript.py', '--list-outputs',
                     str(job.id)]))

            check_list_outputs(self, listed_outputs, output.id,
                               'agg_loss_curve')

            listed_exports = helpers.prepare_cli_output(
                subprocess.check_output([
                    'openquake/bin/oqscript.py', '--export', str(output.id),
                    export_target_dir]))

            self.assertEqual(expected_export_files, listed_exports)
        finally:
            shutil.rmtree(export_target_dir)
    def test_scenario_risk(self):
        # This test exercises the 'mean-based' path through the Scenario Risk
        # calculator. There is no random sampling done here so the results are
        # 100% predictable.
        scen_cfg = helpers.demo_file('scenario_risk/config.gem')

        exp_mean_loss = 1053.09
        exp_stddev_loss = 246.62
        expected_loss_map = [
            dict(asset='a3', pos='15.48 38.25', mean=180.717534009275,
                 stddev=92.2122644809969),
            dict(asset='a2', pos='15.56 38.17', mean=432.225448142534,
                 stddev=186.864456949986),
            dict(asset='a1', pos='15.48 38.09', mean=440.147078317589,
                 stddev=182.615976701858),
        ]

        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
                                 check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.demo_file(
            'scenario_risk/computed_output/loss-map-%s.xml' % job.id)

        self.assertTrue(os.path.exists(expected_loss_map_file))

        helpers.verify_loss_map(self, expected_loss_map_file,
            expected_loss_map, self.LOSSMAP_PRECISION)

        actual_mean, actual_stddev = helpers.mean_stddev_from_result_line(result)

        self.assertAlmostEqual(
            exp_mean_loss, actual_mean, places=self.TOTAL_LOSS_PRECISION)
        self.assertAlmostEqual(
            exp_stddev_loss, actual_stddev, places=self.TOTAL_LOSS_PRECISION)
Example #45
0
    def test_export_dmg_distributions(self):
        cfg = helpers.demo_file("scenario_damage_risk/config.gem")
        export_target_dir = tempfile.mkdtemp()

        try:
            ret_code = helpers.run_job(cfg)
            self.assertEqual(0, ret_code)

            job = models.OqJob.objects.latest("id")

            [oasset] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_per_asset")

            [otaxon] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_per_taxonomy")

            [ototal] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_total")

            [omap] = models.Output.objects.filter(
                oq_job=job.id, output_type="collapse_map")

            calcs = helpers.prepare_cli_output(subprocess.check_output(
                ["openquake/bin/oqscript.py", "--list-calculations"]))

            # we have the calculation...
            check_list_calcs(self, calcs, job.id)

            outputs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ["openquake/bin/oqscript.py", "--list-outputs",
                     str(job.id)]))

            # the damage distributios and collapse map as output...
            check_list_outputs(self, outputs, oasset.id, "dmg_dist_per_asset")
            check_list_outputs(self, outputs, ototal.id, "dmg_dist_total")
            check_list_outputs(self, outputs, omap.id, "collapse_map")
            check_list_outputs(self, outputs, otaxon.id,
                    "dmg_dist_per_taxonomy")

            # and we exported correctly the damage distribution per asset,
            exports = helpers.prepare_cli_output(
                subprocess.check_output(
                    ["openquake/bin/oqscript.py", "--export",
                    str(oasset.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-asset-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and per taxonomy
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["openquake/bin/oqscript.py",
                    "--export", str(otaxon.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-taxonomy-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and total damage distribution
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["openquake/bin/oqscript.py",
                    "--export", str(ototal.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-total-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and collapse map
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["openquake/bin/oqscript.py",
                    "--export", str(omap.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "collapse-map-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)
        finally:
            shutil.rmtree(export_target_dir)
    def test_export_dmg_distributions(self):
        cfg = helpers.demo_file("scenario_damage_risk/config.gem")
        export_target_dir = tempfile.mkdtemp()

        try:
            ret_code = helpers.run_job(cfg)
            self.assertEqual(0, ret_code)

            job = models.OqJob.objects.latest("id")

            [oasset] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_per_asset")

            [otaxon] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_per_taxonomy")

            [ototal] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_total")

            [omap] = models.Output.objects.filter(
                oq_job=job.id, output_type="collapse_map")

            calcs = helpers.prepare_cli_output(subprocess.check_output(
                ["bin/openquake", "--list-calculations"]))

            # we have the calculation...
            check_list_calcs(self, calcs, job.id)

            outputs = helpers.prepare_cli_output(
                subprocess.check_output(["bin/openquake", "--list-outputs",
                str(job.id)]))

            # the damage distributios and collapse map as output...
            check_list_outputs(self, outputs, oasset.id, "dmg_dist_per_asset")
            check_list_outputs(self, outputs, ototal.id, "dmg_dist_total")
            check_list_outputs(self, outputs, omap.id, "collapse_map")
            check_list_outputs(self, outputs, otaxon.id,
                    "dmg_dist_per_taxonomy")

            # and we exported correctly the damage distribution per asset,
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["bin/openquake", "--export",
                str(oasset.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-asset-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and per taxonomy
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["bin/openquake", "--export",
                str(otaxon.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-taxonomy-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and total damage distribution
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["bin/openquake", "--export",
                str(ototal.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-total-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and collapse map
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["bin/openquake", "--export",
                str(omap.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "collapse-map-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)
        finally:
            shutil.rmtree(export_target_dir)
Example #47
0
    def test_scenario_risk_sample_based(self):
        # This QA test is a longer-running test of the Scenario Risk
        # calculator.

        # The vulnerabiilty model has non-zero Coefficients of Variation and
        # therefore exercises the 'sample-based' path through the calculator.
        # This test is configured to produce 1000 ground motion fields at each
        # location of interest (in the test above, only 10 are produced).

        # Since we're seeding the random epsilon sampling, we can consistently
        # reproduce all result values.

        # When these values are compared to the results computed by a similar
        # config which takes the 'mean-based' path (with CoVs = 0), we expect
        # the following:
        # All of the mean values in the 'sample-based' results should be with
        # 5%, + or -, of the 'mean-based' results.
        # The standard deviation values of the 'sample-based' results should
        # simply be greater than those produced with the 'mean-based' method.

        # For comparison, mean and stddev values for the region were computed
        # with 1000 GMFs using the mean-based approach. These values (rounded
        # to 2 decimal places) are:
        mb_mean_loss = 1233.26
        mb_stddev_loss = 443.63
        # Loss map for the mean-based approach:
        mb_loss_map = [
            dict(asset='a3',
                 pos='15.48 38.25',
                 mean=200.54874638,
                 stddev=94.2302991022),
            dict(asset='a2',
                 pos='15.56 38.17',
                 mean=510.821363253,
                 stddev=259.964152622),
            dict(asset='a1',
                 pos='15.48 38.09',
                 mean=521.885458891,
                 stddev=244.825980356),
        ]

        # Given the random seed in this config file, here's what we expect to
        # get for the region:
        exp_mean_loss = 1255.09
        exp_stddev_loss = 530.00
        # Expected loss map for the sample-based approach:
        expected_loss_map = [
            dict(asset='a3',
                 pos='15.48 38.25',
                 mean=201.976066147,
                 stddev=95.1493297113),
            dict(asset='a2',
                 pos='15.56 38.17',
                 mean=516.750292227,
                 stddev=379.617139967),
            dict(asset='a1',
                 pos='15.48 38.09',
                 mean=522.856225188,
                 stddev=248.025575687),
        ]

        # Sanity check on the test data defined above, because humans suck at
        # math:
        self.assertAlmostEqual(mb_mean_loss,
                               exp_mean_loss,
                               delta=mb_mean_loss * 0.05)
        self.assertTrue(exp_stddev_loss > mb_stddev_loss)
        # ... and the loss map:
        for i, lm_node in enumerate(mb_loss_map):
            exp_lm_node = expected_loss_map[i]

            delta = lm_node['mean'] * 0.05
            self.assertAlmostEqual(lm_node['mean'],
                                   exp_lm_node['mean'],
                                   delta=delta)
            self.assertTrue(exp_lm_node['stddev'] > lm_node['stddev'])

        # Sanity checks are done. Let's do this.
        scen_cfg = helpers.demo_file(
            'scenario_risk/config_sample-based_qa.gem')
        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
                                 check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.demo_file(
            'scenario_risk/computed_output/loss-map-%s.xml' % job.id)

        self.assertTrue(os.path.exists(expected_loss_map_file))

        self._verify_loss_map(expected_loss_map_file, expected_loss_map)

        result = [line for line in result.split('\n') if len(line) > 0]

        self.assertEqual(2, len(result))

        actual_mean = float(result[0].split()[-1])
        actual_stddev = float(result[1].split()[-1])

        self.assertAlmostEqual(exp_mean_loss,
                               actual_mean,
                               places=self.TOTAL_LOSS_PRECISION)
        self.assertAlmostEqual(exp_stddev_loss,
                               actual_stddev,
                               places=self.TOTAL_LOSS_PRECISION)
Example #48
0
    def test_scenario_risk_sample_based(self):
        # This QA test is a longer-running test of the Scenario Risk
        # calculator.

        # The vulnerabiilty model has non-zero Coefficients of Variation and
        # therefore exercises the 'sample-based' path through the calculator.
        # This test is configured to produce 1000 ground motion fields at each
        # location of interest (in the test above, only 10 are produced).

        # Since we're seeding the random epsilon sampling, we can consistently
        # reproduce all result values.

        # When these values are compared to the results computed by a similar
        # config which takes the 'mean-based' path (with CoVs = 0), we expect
        # the following:
        # All of the mean values in the 'sample-based' results should be with
        # 5%, + or -, of the 'mean-based' results.
        # The standard deviation values of the 'sample-based' results should
        # simply be greater than those produced with the 'mean-based' method.

        # For comparison, mean and stddev values for the region were computed
        # with 1000 GMFs using the mean-based approach. These values (rounded
        # to 2 decimal places) are:
        mb_mean_loss = 1233.26
        mb_stddev_loss = 443.63
        # Loss map for the mean-based approach:
        mb_loss_map = [
            dict(asset='a3',
                 pos='15.48 38.25',
                 mean=200.54874638,
                 stddev=94.2302991022),
            dict(asset='a2',
                 pos='15.56 38.17',
                 mean=510.821363253,
                 stddev=259.964152622),
            dict(asset='a1',
                 pos='15.48 38.09',
                 mean=521.885458891,
                 stddev=244.825980356),
        ]

        # Sanity checks are done. Let's do this.
        scen_cfg = helpers.demo_file(
            'scenario_risk/config_sample-based_qa.gem')
        result = helpers.run_job(scen_cfg, ['--output-type=xml'],
                                 check_output=True)

        job = OqJob.objects.latest('id')
        self.assertEqual('succeeded', job.status)

        expected_loss_map_file = helpers.demo_file(
            'scenario_risk/computed_output/loss-map-%s.xml' % job.id)
        self.assertTrue(os.path.exists(expected_loss_map_file))

        loss_map = helpers.loss_map_result_from_file(expected_loss_map_file)
        self._verify_loss_map_within_range(sorted(mb_loss_map),
                                           sorted(loss_map), 0.05)

        exp_mean_loss, exp_stddev_loss = helpers.mean_stddev_from_result_line(
            result)
        self.assertAlmostEqual(mb_mean_loss,
                               exp_mean_loss,
                               delta=mb_mean_loss * 0.05)
        self.assertTrue(exp_stddev_loss > mb_stddev_loss)
Example #49
0
 def _do_test(self, cfg):
     helpers.run_job(cfg)
     job = models.OqJob.objects.latest('id')
     self.assertEqual('succeeded', job.status)
Example #50
0
 def _run_job(self, config):
     ret_code = helpers.run_job(config, ['--output-type=xml'])
     self.assertEquals(0, ret_code)
 def _run_job(self, config):
     ret_code = helpers.run_job(config, ["--output-type=xml"])
     self.assertEquals(0, ret_code)