Example #1
0
    def test_bcr_risk_export(self):
        # Tests that outputs of a risk classical calculation are
        # exported

        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.demo_file('classical_bcr/job.ini')

            # run the calculation to create something to export
            retcode = helpers.run_risk_job_sp(cfg, self.hazard_id,
                                              silence=True)
            self.assertEqual(0, retcode)

            job = models.OqJob.objects.latest('id')

            outputs = export_core.get_outputs(job.id)
            expected_outputs = 1  # 1 bcr distribution
            self.assertEqual(expected_outputs, len(outputs))

            # Export the loss curves:
            distribution = outputs.filter(output_type='bcr_distribution')[0]
            rc_files = risk.export(distribution.id, target_dir)

            self.assertEqual(1, len(rc_files))

            for f in rc_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Example #2
0
    def run_risk(self, cfg, hazard_id):
        """
        Given the path to job config file, run the job and assert that it was
        successful. If this assertion passes, return the completed job.

        :param str cfg:
            Path to a job config file.
        :param int hazard_id:
            ID of the hazard output used by the risk calculation
        :returns:
            The completed :class:`~openquake.engine.db.models.OqJob`.
        :raises:
            :exc:`AssertionError` if the job was not successfully run.
        """
        job_status = helpers.run_risk_job_sp(cfg, hazard_id, silence=True)
        self.assertEqual(0, job_status)

        completed_job = models.OqJob.objects.latest('last_update')
        self.assertEqual('complete', completed_job.status)
        return completed_job
Example #3
0
    def test_classical_risk_export(self):
        # Tests that outputs of a risk classical calculation are exported
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.demo_file('classical_psha_based_risk/job.ini')

            # run the calculation to create something to export
            retcode = helpers.run_risk_job_sp(cfg, self.hazard_id,
                                              silence=True)
            self.assertEqual(0, retcode)

            job = models.OqJob.objects.latest('id')

            outputs = export_core.get_outputs(job.id)
            expected_outputs = 4  # 1 loss curve set + 3 loss curve map set
            self.assertEqual(expected_outputs, len(outputs))

            # Export the loss curves:
            curves = outputs.filter(output_type='loss_curve')
            rc_files = []
            for curve in curves:
                rc_files.extend(risk.export(curve.id, target_dir))

            self.assertEqual(1, len(rc_files))

            for f in rc_files:
                self._test_exported_file(f)

            # Test loss map export as well.
            maps = outputs.filter(output_type='loss_map')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(3, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Example #4
0
    def test_event_based_risk_export(self):
        # Tests that outputs of a risk classical calculation are exported
        target_dir = tempfile.mkdtemp()

        try:
            # use get_risk_job to create a fake GmfCollection
            job, _ = helpers.get_risk_job('event_based_risk/job.ini',
                                          'event_based_hazard/job.ini',
                                          'gmf')

            cfg = helpers.demo_file('event_based_risk/job.ini')

            # run the calculation to create something to export

            # at the moment, only gmf for a specific realization are
            # supported as hazard input
            retcode = helpers.run_risk_job_sp(
                cfg, silence=True,
                hazard_id=job.risk_calculation.hazard_output.id)
            self.assertEqual(0, retcode)

            job = models.OqJob.objects.latest('id')

            outputs = export_core.get_outputs(job.id)
            # 1 loss curve set + 3 loss curve map set + 1 insured + 1 aggregate
            expected_outputs = 6
            self.assertEqual(expected_outputs, len(outputs))

            # Export the loss curves...
            curves = outputs.filter(output_type='loss_curve')
            rc_files = []
            for curve in curves:
                rc_files.extend(risk.export(curve.id, target_dir))

            self.assertEqual(1, len(rc_files))

            for f in rc_files:
                self._test_exported_file(f)

            # ... loss map ...
            maps = outputs.filter(output_type='loss_map')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(3, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)

            # ... aggregate losses...
            maps = outputs.filter(output_type='agg_loss_curve')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(1, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)

            # and insured losses.
            maps = outputs.filter(output_type='ins_loss_curve')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(1, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)

        finally:
            shutil.rmtree(target_dir)