Beispiel #1
0
    def test_bcr_risk_export(self):
        # Tests that outputs of a risk classical calculation are
        # exported

        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.demo_file('classical_bcr/job.ini')

            # run the calculation to create something to export
            retcode = helpers.run_risk_job_sp(cfg, self.hazard_id,
                                              silence=True)
            self.assertEqual(0, retcode)

            job = models.OqJob.objects.latest('id')

            outputs = export_core.get_outputs(job.id)
            expected_outputs = 1  # 1 bcr distribution
            self.assertEqual(expected_outputs, len(outputs))

            # Export the loss curves:
            distribution = outputs.filter(output_type='bcr_distribution')[0]
            rc_files = risk.export(distribution.id, target_dir)

            self.assertEqual(1, len(rc_files))

            for f in rc_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Beispiel #2
0
    def test_classical_risk_export(self):
        # Tests that outputs of a risk classical calculation are exported
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.demo_file('classical_psha_based_risk/job.ini')

            # run the calculation to create something to export
            retcode = helpers.run_risk_job_sp(cfg, self.hazard_id,
                                              silence=True)
            self.assertEqual(0, retcode)

            job = models.OqJob.objects.latest('id')

            outputs = export_core.get_outputs(job.id)
            expected_outputs = 4  # 1 loss curve set + 3 loss curve map set
            self.assertEqual(expected_outputs, len(outputs))

            # Export the loss curves:
            curves = outputs.filter(output_type='loss_curve')
            rc_files = []
            for curve in curves:
                rc_files.extend(risk.export(curve.id, target_dir))

            self.assertEqual(1, len(rc_files))

            for f in rc_files:
                self._test_exported_file(f)

            # Test loss map export as well.
            maps = outputs.filter(output_type='loss_map')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(3, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Beispiel #3
0
    def test_event_based_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_event_based.ini'
            )
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_event_based.ini'
            )

            haz_job = helpers.run_hazard_job(haz_cfg)
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_risk_job(
                risk_cfg, hazard_calculation_id=haz_job.hazard_calculation.id
            )

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            agg_loss_curve_outputs = risk_outputs.filter(
                output_type='agg_loss_curve')
            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # (1 mean + 2 quantiles) * 2 (as there also insured curves)
            self.assertEqual(6, loss_curve_outputs.count())

            # 16 rlzs + 16 (due to insured curves)
            event_loss_curve_outputs = risk_outputs.filter(
                output_type='event_loss_curve')
            self.assertEqual(32, event_loss_curve_outputs.count())
            self.assertEqual(16, agg_loss_curve_outputs.count())

            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job
            )
            # sanity check (16 aggregate loss curve + 38 loss curves)
            self.assertEqual(54, loss_curves.count())
            # mean
            self.assertEqual(2, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                4, loss_curves.filter(statistics='quantile').count()
            )

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 16 event loss table (1 per rlz)
            event_loss_tables = risk_outputs.filter(output_type="event_loss")
            self.assertEqual(16, event_loss_tables.count())

            # 32 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(32, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(risk.export(o.id, target_dir, 'xml'))
            for o in event_loss_curve_outputs:
                loss_curve_files.append(risk.export(o.id, target_dir, 'xml'))

            agg_loss_curve_files = []
            for o in agg_loss_curve_outputs:
                agg_loss_curve_files.append(
                    risk.export(o.id, target_dir, 'xml')
                )

            event_loss_table_files = []
            for o in event_loss_tables:
                event_loss_table_files.append(
                    risk.export(o.id, target_dir, 'xml')
                )

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(risk.export(o.id, target_dir, 'xml'))

            self.assertEqual(38, len(loss_curve_files))
            self.assertEqual(16, len(agg_loss_curve_files))
            self.assertEqual(16, len(event_loss_table_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Beispiel #4
0
    def test_classical_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_classical.ini'
            )
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_classical.ini'
            )

            haz_job = helpers.run_hazard_job(haz_cfg)
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_risk_job(
                risk_cfg, hazard_calculation_id=haz_job.hazard_calculation.id
            )

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # 16 logic tree realizations + 1 mean + 2 quantiles = 19
            # + 19 insured loss curves
            self.assertEqual(38, loss_curve_outputs.count())
            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job,
                insured=False
            )
            # sanity check
            self.assertEqual(19, loss_curves.count())

            insured_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job,
                insured=True
            )
            # sanity check
            self.assertEqual(19, insured_curves.count())

            # mean
            self.assertEqual(1, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                2, loss_curves.filter(statistics='quantile').count()
            )

            # mean
            self.assertEqual(
                1, insured_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                2, insured_curves.filter(statistics='quantile').count()
            )

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 19 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(19, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(risk.export(o.id, target_dir, 'xml'))

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(risk.export(o.id, target_dir, 'xml'))

            self.assertEqual(38, len(loss_curve_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Beispiel #5
0
    def test_event_based_risk_export(self):
        # Tests that outputs of a risk classical calculation are exported
        target_dir = tempfile.mkdtemp()

        try:
            # use get_risk_job to create a fake GmfCollection
            job, _ = helpers.get_risk_job('event_based_risk/job.ini',
                                          'event_based_hazard/job.ini',
                                          'gmf')

            cfg = helpers.demo_file('event_based_risk/job.ini')

            # run the calculation to create something to export

            # at the moment, only gmf for a specific realization are
            # supported as hazard input
            retcode = helpers.run_risk_job_sp(
                cfg, silence=True,
                hazard_id=job.risk_calculation.hazard_output.id)
            self.assertEqual(0, retcode)

            job = models.OqJob.objects.latest('id')

            outputs = export_core.get_outputs(job.id)
            # 1 loss curve set + 3 loss curve map set + 1 insured + 1 aggregate
            expected_outputs = 6
            self.assertEqual(expected_outputs, len(outputs))

            # Export the loss curves...
            curves = outputs.filter(output_type='loss_curve')
            rc_files = []
            for curve in curves:
                rc_files.extend(risk.export(curve.id, target_dir))

            self.assertEqual(1, len(rc_files))

            for f in rc_files:
                self._test_exported_file(f)

            # ... loss map ...
            maps = outputs.filter(output_type='loss_map')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(3, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)

            # ... aggregate losses...
            maps = outputs.filter(output_type='agg_loss_curve')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(1, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)

            # and insured losses.
            maps = outputs.filter(output_type='ins_loss_curve')
            lm_files = sum(
                [risk.export(loss_map.id, target_dir)
                 for loss_map in maps], [])

            self.assertEqual(1, len(lm_files))

            for f in lm_files:
                self._test_exported_file(f)

        finally:
            shutil.rmtree(target_dir)