Ejemplo n.º 1
0
 def setUp(self):
     self.job, _ = helpers.get_fake_risk_job(
         get_data_path('classical_psha_based_risk/job.ini'),
         get_data_path('simple_fault_demo_hazard/job.ini'))
     models.JobStats.objects.create(oq_job=self.job)
     self.job.is_running = True
     self.job.save()
Ejemplo n.º 2
0
    def setUp(self):
        self.job, _ = helpers.get_fake_risk_job(
            get_data_path('classical_bcr/job.ini'),
            get_data_path('simple_fault_demo_hazard/job.ini'))

        self.calculator = classical_bcr.ClassicalBCRRiskCalculator(self.job)
        models.JobStats.objects.create(oq_job=self.job)
Ejemplo n.º 3
0
    def setUp(self):
        self.job, _ = helpers.get_fake_risk_job(
            get_data_path('event_based_bcr/job.ini'),
            get_data_path('event_based_hazard/job.ini'), output_type="gmf")

        self.calculator = core.EventBasedBCRRiskCalculator(self.job)
        models.JobStats.objects.create(oq_job=self.job)
Ejemplo n.º 4
0
    def test_job_from_file(self):
        # make a hazard job
        haz_cfg = helpers.get_data_path('event_based_hazard/job.ini')
        haz_job = engine.job_from_file(haz_cfg, 'test_user')

        # make a fake Output
        out = models.Output.objects.create(
            oq_job=haz_job, display_name='fake', output_type='gmf')

        # make a risk job
        risk_cfg = helpers.get_data_path('event_based_risk/job.ini')
        with mock.patch.object(logs.LOG, 'warn') as warn:
            risk_job = engine.job_from_file(
                risk_cfg, 'another_user', hazard_output_id=out.id)

        # make sure a warning is printed because you are using a hazard
        # generated by a different user
        self.assertEqual(warn.call_args[0],
                         ('You are using a hazard calculation ran by %s',
                          'test_user'))

        with mock.patch.object(logs.LOG, 'warn') as warn:
            risk_job = engine.job_from_file(
                risk_cfg, 'test_user', hazard_output_id=out.id,
                quantile_loss_curves='0.1 0.2')

        # make sure a warning is printed because you are using
        # quantile_loss_curves with a single hazard output
        self.assertEqual(
            warn.call_args[0][0],
            'quantile_loss_curves is on, but you passed a single hazard '
            'output: the statistics will not be computed')

        # make sure the hazard job is associated correctly
        self.assertEqual(risk_job.hazard_calculation.id, haz_job.id)
Ejemplo n.º 5
0
 def setUp(self):
     self.job, _ = helpers.get_fake_risk_job(
         get_data_path('classical_psha_based_risk/job.ini'),
         get_data_path('simple_fault_demo_hazard/job.ini'))
     models.JobStats.objects.create(oq_job=self.job)
     self.job.is_running = True
     self.job.save()
Ejemplo n.º 6
0
    def setUp(self):
        self.job, _ = helpers.get_fake_risk_job(
            get_data_path('event_based_risk/job.ini'),
            get_data_path('event_based_hazard/job.ini'), output_type="gmf")

        self.calculator = event_based.EventBasedRiskCalculator(self.job)
        models.JobStats.objects.create(oq_job=self.job)
        self.calculator.pre_execute()
        self.job.is_running = True
        self.job.status = 'executing'
        self.job.save()
Ejemplo n.º 7
0
 def setUp(self):
     job, _ = helpers.get_fake_risk_job(
         get_data_path('classical_psha_based_risk/job.ini'),
         get_data_path('simple_fault_demo_hazard/job.ini')
     )
     self.compulsory_arguments = dict(
         lrem_steps_per_interval=5)
     self.other_args = dict(
         calculation_mode="classical",
         region_constraint=(
             'POLYGON((-122.0 38.113, -122.114 38.113, -122.57 38.111, '
             '-122.0 38.113))'),
         hazard_output=job.risk_calculation.hazard_output)
Ejemplo n.º 8
0
    def setUp(self):
        self.job, _ = helpers.get_fake_risk_job(
            get_data_path('scenario_risk/job.ini'),
            get_data_path('scenario_hazard/job.ini'),
            output_type="gmf_scenario")

        self.calculator = scenario.ScenarioRiskCalculator(self.job)
        models.JobStats.objects.create(oq_job=self.job)
        self.job.is_running = True
        self.job.save()
        self.calculator.pre_execute()
        self.job.status = 'executing'
        self.job.save()
Ejemplo n.º 9
0
    def test_export_for_scenario(self):
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.get_data_path("scenario_hazard/job.ini")

            # run the calculation in process to create something to export
            os.environ["OQ_NO_DISTRIBUTE"] = "1"
            try:
                helpers.run_job(cfg)
            finally:
                del os.environ["OQ_NO_DISTRIBUTE"]
            job = models.OqJob.objects.latest("id")
            self.assertEqual(job.status, "complete")

            outputs = export_core.get_outputs(job.id)

            self.assertEqual(1, len(outputs))  # 1 GMF

            gmf_outputs = outputs.filter(output_type="gmf_scenario")
            self.assertEqual(1, len(gmf_outputs))

            exported_file = check_export(gmf_outputs[0].id, target_dir)

            # Check the file paths exist, is absolute, and the file isn't
            # empty.
            self._test_exported_file(exported_file)

            # Check for the correct number of GMFs in the file:
            tree = etree.parse(exported_file)
            self.assertEqual(20, number_of("nrml:gmf", tree))
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 10
0
 def setUpClass(cls):
     cfg = helpers.get_data_path('event_based_hazard/job.ini')
     job = helpers.get_hazard_job(cfg)
     rlz1 = models.LtRealization.objects.create(
         hazard_calculation=job.hazard_calculation,
         ordinal=1, seed=1, weight=None,
         sm_lt_path="test_sm", gsim_lt_path="test_gsim")
     rlz2 = models.LtRealization.objects.create(
         hazard_calculation=job.hazard_calculation,
         ordinal=2, seed=1, weight=None,
         sm_lt_path="test_sm", gsim_lt_path="test_gsim_2")
     ses_coll1 = models.SESCollection.objects.create(
         output=models.Output.objects.create_output(
             job, "Test SES Collection 1", "ses"),
         lt_realization=rlz1)
     ses_coll2 = models.SESCollection.objects.create(
         output=models.Output.objects.create_output(
             job, "Test SES Collection 2", "ses"),
         lt_realization=rlz2)
     gmf_data1 = helpers.create_gmf_data_records(job, rlz1, ses_coll1)[0]
     points = [(15.3, 38.22), (15.7, 37.22),
               (15.4, 38.09), (15.56, 38.1), (15.2, 38.2)]
     gmf_data2 = helpers.create_gmf_data_records(
         job, rlz2, ses_coll2, points)[0]
     cls.gmf_coll1 = gmf_data1.gmf
     cls.ruptures1 = tuple(get_tags(gmf_data1))
     cls.ruptures2 = tuple(get_tags(gmf_data2))
     cls.investigation_time = job.hazard_calculation.investigation_time
Ejemplo n.º 11
0
    def setUp(self):
        self.job, _ = helpers.get_fake_risk_job(
            get_data_path("classical_psha_based_risk/job.ini"), get_data_path("simple_fault_demo_hazard/job.ini")
        )
        calculator = base.RiskCalculator(self.job)
        models.JobStats.objects.create(oq_job=self.job)
        calculator.pre_execute()
        self.rc = self.job.risk_calculation

        common_fake_args = dict(exposure_model=self.rc.exposure_model, taxonomy="test")

        asset = models.ExposureData(site=Point(0.5, 0.5), asset_ref="test1", **common_fake_args)
        asset.save()

        asset = models.ExposureData(site=Point(179.1, 0), asset_ref="test2", **common_fake_args)
        asset.save()
Ejemplo n.º 12
0
    def get_hazard_job(self):
        hazard_imls = [
            0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
            0.55, 0.6, 0.7, 0.8, 0.9, 1.0
        ]
        poes = [
            0.039861266979, 0.039861266979, 0.0397287574803, 0.0296134266256,
            0.0198273287565, 0.0130622701615, 0.00865538795, 0.00589852059369,
            0.00406169858951, 0.00281172717953, 0.00199511741778,
            0.00135870597285, 0.000989667841574, 0.000757544444296,
            0.000272824002046, 0.0, 0.0, 0.
        ]
        job = helpers.get_job(
            helpers.get_data_path("simple_fault_demo_hazard/job.ini"),
            intensity_measure_types_and_levels=str({'PGA': hazard_imls}))

        models.HazardSite.objects.create(hazard_calculation=job,
                                         location="POINT(1 1)")
        models.HazardCurveData.objects.create(
            hazard_curve=models.HazardCurve.objects.create(
                output=models.Output.objects.create_output(
                    job, "Test Hazard curve", "hazard_curve"),
                investigation_time=50,
                imt="PGA",
                imls=hazard_imls,
                statistics="mean"),
            poes=poes,
            location="POINT(1 1)")

        return job
Ejemplo n.º 13
0
    def get_hazard_job(self):
        job = helpers.get_hazard_job(
            helpers.get_data_path("simple_fault_demo_hazard/job.ini"))

        hazard_curve = [
            (0.001, 0.0398612669790014),
            (0.01, 0.039861266979001400), (0.05, 0.039728757480298900),
            (0.10, 0.029613426625612500), (0.15, 0.019827328756491600),
            (0.20, 0.013062270161451900), (0.25, 0.008655387950000430),
            (0.30, 0.005898520593689670), (0.35, 0.004061698589511780),
            (0.40, 0.002811727179526820), (0.45, 0.001995117417776690),
            (0.50, 0.001358705972845710), (0.55, 0.000989667841573727),
            (0.60, 0.000757544444296432), (0.70, 0.000272824002045979),
            (0.80, 0.00), (0.9, 0.00), (1.0, 0.00)]

        models.HazardCurveData.objects.create(
            hazard_curve=models.HazardCurve.objects.create(
                output=models.Output.objects.create_output(
                    job, "Test Hazard curve", "hazard_curve"),
                investigation_time=50,
                imt="PGA", imls=[hz[0] for hz in hazard_curve],
                statistics="mean"),
            poes=[hz[1] for hz in hazard_curve],
            location="POINT(1 1)")

        return job
Ejemplo n.º 14
0
    def test_export_for_scenario(self):
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.get_data_path('scenario_hazard/job.ini')
            # run the calculation in process to create something to export
            with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}):
                helpers.run_job(cfg)
            job = models.OqJob.objects.latest('id')
            self.assertEqual(job.status, 'complete')

            outputs = core.get_outputs(job.id)

            gmf_outputs = outputs.filter(ds_key='gmfs')
            self.assertEqual(1, len(gmf_outputs))

            exported_file = check_export(gmf_outputs[0].id, target_dir)

            # Check the file paths exist, is absolute, and the file isn't
            # empty.
            self._test_exported_file(exported_file)

            # Check for the correct number of GMFs in the file:
            tree = etree.parse(exported_file)
            self.assertEqual(20, number_of('nrml:gmf', tree))
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 15
0
    def test(self):
        # check that if risk models are provided, then the ``points to
        # compute`` and the imls are got from there

        username = helpers.default_user()

        job = engine.prepare_job(username)

        cfg = helpers.get_data_path("classical_job-sd-imt.ini")
        params = vars(readini.parse_config(open(cfg)))
        del params["hazard_calculation_id"]
        del params["hazard_output_id"]
        haz_calc = engine.create_calculation(models.HazardCalculation, params)
        haz_calc = models.HazardCalculation.objects.get(id=haz_calc.id)
        job.hazard_calculation = haz_calc
        job.is_running = True
        job.save()

        calc = get_calculator_class("hazard", job.hazard_calculation.calculation_mode)(job)
        calc.parse_risk_models()

        self.assertEqual(
            [(1.0, -1.0), (0.0, 0.0)], [(point.latitude, point.longitude) for point in haz_calc.points_to_compute()]
        )
        self.assertEqual(["PGA"], haz_calc.get_imts())

        self.assertEqual(3, haz_calc.oqjob.exposuremodel.exposuredata_set.count())

        return job
Ejemplo n.º 16
0
 def get_hazard_job(self):
     job = helpers.get_job(
         helpers.get_data_path("scenario_hazard/job.ini"),
         number_of_ground_motion_fields=1000)
     fname = self._test_path('gmf_scenario.csv')
     helpers.create_gmf_from_csv(job, fname, 'gmf_scenario')
     return job
Ejemplo n.º 17
0
    def test_job_from_file(self):
        # make a hazard job
        haz_cfg = helpers.get_data_path('event_based_hazard/job.ini')
        haz_job = engine.job_from_file(haz_cfg, 'test_user')

        # make a fake Output
        out = models.Output.objects.create(
            oq_job=haz_job, display_name='fake', output_type='gmf')

        # make a risk job
        risk_cfg = helpers.get_data_path('event_based_risk/job.ini')
        risk_job = engine.job_from_file(risk_cfg, 'test_user',
                                        hazard_output_id=out.id)
        # make sure the hazard job is associated correctly
        oqjob = risk_job.risk_calculation.hazard_calculation
        self.assertEqual(oqjob.id, haz_job.id)
Ejemplo n.º 18
0
    def setUpClass(self):
        cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
        job = helpers.get_job(cfg)
        output = models.Output.objects.create(
            oq_job=job, display_name='test', output_type='ses')
        ses_coll = models.SESCollection.create(
            output=output)
        self.mesh_lons = numpy.array(
            [0.1 * x for x in range(16)]).reshape((4, 4))
        self.mesh_lats = numpy.array(
            [0.2 * x for x in range(16)]).reshape((4, 4))
        self.mesh_depths = numpy.array(
            [0.3 * x for x in range(16)]).reshape((4, 4))

        sfs = SimpleFaultSurface(
            Mesh(self.mesh_lons, self.mesh_lats, self.mesh_depths))

        ps = PlanarSurface(
            10, 20, 30,
            Point(3.9, 2.2, 10), Point(4.90402718, 3.19634248, 10),
            Point(5.9, 2.2, 90), Point(4.89746275, 1.20365263, 90))
        self.fault_rupture = models.ProbabilisticRupture.objects.create(
            ses_collection=ses_coll, magnitude=5, rake=0, surface=sfs,
            is_from_fault_source=True, is_multi_surface=False)
        self.source_rupture = models.ProbabilisticRupture.objects.create(
            ses_collection=ses_coll, magnitude=5, rake=0, surface=ps,
            is_from_fault_source=False, is_multi_surface=False)
Ejemplo n.º 19
0
 def get_hazard_job(self):
     job = helpers.get_job(
         helpers.get_data_path("scenario_hazard/job.ini"))
     fname = os.path.join(os.path.dirname(case_1.__file__),
                          'gmf_scenario.csv')
     helpers.create_gmf_from_csv(job, fname, 'gmf_scenario')
     return job
Ejemplo n.º 20
0
    def test_export_for_scenario(self):
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.get_data_path('scenario_hazard/job.ini')
            # run the calculation in process to create something to export
            with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}):
                helpers.run_job(cfg)
            job = models.OqJob.objects.latest('id')
            self.assertEqual(job.status, 'complete')

            outputs = core.get_outputs(job.id)

            self.assertEqual(2, len(outputs))  # 1 GMF, 1 SES

            gmf_outputs = outputs.filter(output_type='gmf_scenario')
            self.assertEqual(1, len(gmf_outputs))

            exported_file = check_export(gmf_outputs[0].id, target_dir)

            # Check the file paths exist, is absolute, and the file isn't
            # empty.
            self._test_exported_file(exported_file)

            # Check for the correct number of GMFs in the file:
            tree = etree.parse(exported_file)
            self.assertEqual(20, number_of('nrml:gmf', tree))
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 21
0
    def test_validate_warns(self):
        # Test that `validate` raises warnings if unnecessary parameters are
        # specified for a given calculation.
        # For example, `ses_per_logic_tree_path` is an event-based hazard
        # param; if this param is specified for a classical hazard job, a
        # warning should be raised.
        cfg_file = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
        job = engine.prepare_job()
        params = engine.parse_config(open(cfg_file, 'r'))
        # Add a few superfluous parameters:
        params['ses_per_logic_tree_path'] = 5
        params['ground_motion_correlation_model'] = 'JB2009'
        calculation = engine.create_calculation(
            models.HazardCalculation, params)
        job.hazard_calculation = calculation
        job.save()

        with warnings.catch_warnings(record=True) as w:
            validation.validate(job, 'hazard', params, ['xml'])

        expected_warnings = [
            "Unknown parameter '%s' for calculation mode 'classical'."
            " Ignoring." % x for x in ('ses_per_logic_tree_path',
                                       'ground_motion_correlation_model')
        ]

        actual_warnings = [m.message.message for m in w]
        self.assertEqual(sorted(expected_warnings), sorted(actual_warnings))
Ejemplo n.º 22
0
    def test_store_site_model(self):
        # Setup
        site_model = helpers.get_data_path("site_model.xml")

        exp_site_model = [
            dict(lon=-122.5, lat=37.5, vs30=800.0, vs30_type="measured", z1pt0=100.0, z2pt5=5.0),
            dict(lon=-122.6, lat=37.6, vs30=801.0, vs30_type="measured", z1pt0=101.0, z2pt5=5.1),
            dict(lon=-122.7, lat=37.7, vs30=802.0, vs30_type="measured", z1pt0=102.0, z2pt5=5.2),
            dict(lon=-122.8, lat=37.8, vs30=803.0, vs30_type="measured", z1pt0=103.0, z2pt5=5.3),
            dict(lon=-122.9, lat=37.9, vs30=804.0, vs30_type="measured", z1pt0=104.0, z2pt5=5.4),
        ]

        job = models.OqJob.objects.create(user_name="openquake")
        ids = general.store_site_model(job, site_model)

        actual_site_model = models.SiteModel.objects.filter(job=job).order_by("id")

        for i, exp in enumerate(exp_site_model):
            act = actual_site_model[i]

            self.assertAlmostEqual(exp["lon"], act.location.x)
            self.assertAlmostEqual(exp["lat"], act.location.y)
            self.assertAlmostEqual(exp["vs30"], act.vs30)
            self.assertEqual(exp["vs30_type"], act.vs30_type)
            self.assertAlmostEqual(exp["z1pt0"], act.z1pt0)
            self.assertAlmostEqual(exp["z2pt5"], act.z2pt5)

        # last, check that the `store_site_model` function returns all of the
        # newly-inserted records
        for i, s in enumerate(ids):
            self.assertEqual(s, actual_site_model[i].id)
Ejemplo n.º 23
0
 def setUp(self):
     self.cfg = helpers.get_data_path('event_based_hazard/job_2.ini')
     self.job = helpers.get_job(self.cfg, username=getpass.getuser())
     self.calc = core.EventBasedHazardCalculator(self.job)
     hc = self.job.hazard_calculation
     hc._site_collection = make_site_coll(0, 0, n=5)
     models.JobStats.objects.create(oq_job=self.job)
Ejemplo n.º 24
0
    def test_initialize_site_model(self):
        # we need a slightly different config file for this test
        cfg = helpers.get_data_path(
            'simple_fault_demo_hazard/job_with_site_model.ini')
        self.job = helpers.get_hazard_job(cfg)
        self.calc = core.ClassicalHazardCalculator(self.job)

        self.calc.initialize_site_model()
        # If the site model isn't valid for the calculation geometry, a
        # `RuntimeError` should be raised here

        # Okay, it's all good. Now check the count of the site model records.
        sm_nodes = models.SiteModel.objects.filter(job=self.job)

        self.assertEqual(2601, len(sm_nodes))

        num_pts_to_compute = len(
            self.job.hazard_calculation.points_to_compute())

        hazard_site = models.HazardSite.objects.filter(
            hazard_calculation=self.job.hazard_calculation)

        # The site model is good. Now test that `hazard_site` was computed.
        # For now, just test the length.
        self.assertEqual(num_pts_to_compute, len(hazard_site))
Ejemplo n.º 25
0
    def get_hazard_job(self):
        hazard_imls = [0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
                       0.45, 0.5, 0.55, 0.6, 0.7, 0.8, 0.9, 1.0]
        poes = [0.039861266979, 0.039861266979, 0.0397287574803,
                0.0296134266256, 0.0198273287565, 0.0130622701615,
                0.00865538795, 0.00589852059369, 0.00406169858951,
                0.00281172717953, 0.00199511741778, 0.00135870597285,
                0.000989667841574, 0.000757544444296, 0.000272824002046,
                0.0, 0.0, 0.]
        job = helpers.get_job(
            helpers.get_data_path("simple_fault_demo_hazard/job.ini"),
            intensity_measure_types_and_levels=str({'PGA': hazard_imls}))

        models.HazardSite.objects.create(
            hazard_calculation=job, location="POINT(1 1)")
        models.HazardCurveData.objects.create(
            hazard_curve=models.HazardCurve.objects.create(
                output=models.Output.objects.create_output(
                    job, "Test Hazard curve", "hazard_curve"),
                investigation_time=50,
                imt="PGA", imls=hazard_imls,
                statistics="mean"),
            poes=poes,
            location="POINT(1 1)")

        return job
Ejemplo n.º 26
0
    def setUpClass(self):
        cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
        job = helpers.get_job(cfg)
        output = models.Output.objects.create(oq_job=job,
                                              display_name='test',
                                              output_type='ses')
        ses_coll = models.SESCollection.create(output=output)
        self.mesh_lons = numpy.array([0.1 * x for x in range(16)]).reshape(
            (4, 4))
        self.mesh_lats = numpy.array([0.2 * x for x in range(16)]).reshape(
            (4, 4))
        self.mesh_depths = numpy.array([0.3 * x for x in range(16)]).reshape(
            (4, 4))

        sfs = SimpleFaultSurface(
            Mesh(self.mesh_lons, self.mesh_lats, self.mesh_depths))

        ps = PlanarSurface(10, 20, 30, Point(3.9, 2.2, 10),
                           Point(4.90402718, 3.19634248, 10),
                           Point(5.9, 2.2, 90),
                           Point(4.89746275, 1.20365263, 90))
        self.fault_rupture = models.ProbabilisticRupture.objects.create(
            ses_collection=ses_coll,
            magnitude=5,
            rake=0,
            surface=sfs,
            is_from_fault_source=True,
            is_multi_surface=False)
        self.source_rupture = models.ProbabilisticRupture.objects.create(
            ses_collection=ses_coll,
            magnitude=5,
            rake=0,
            surface=ps,
            is_from_fault_source=False,
            is_multi_surface=False)
Ejemplo n.º 27
0
 def setUpClass(cls):
     cfg = helpers.get_data_path("calculators/hazard/classical/haz_map_test_job.ini")
     job = helpers.get_job(cfg)
     models.JobStats.objects.create(oq_job=job)
     hc = job.hazard_calculation
     cls.calc = get_calculator_class("hazard", hc.calculation_mode)(job)
     cls.calc.initialize_site_model()
     assert len(hc.site_collection) == 2, len(hc.site_collection)
Ejemplo n.º 28
0
 def setUp(self):
     cfg = helpers.get_data_path("simple_fault_demo_hazard/job.ini")
     self.job = helpers.get_job(cfg, username="******")
     for i in range(0, random.randint(1, 10)):
         lt_model = models.LtSourceModel.objects.create(
             hazard_calculation=self.job.hazard_calculation, ordinal=i, sm_lt_path=[i]
         )
         models.LtRealization(lt_model=lt_model, ordinal=i, weight=1 / (i + 1), gsim_lt_path=[i]).save()
Ejemplo n.º 29
0
 def setUpClass(cls):
     cfg = helpers.get_data_path(
         'calculators/hazard/classical/haz_map_test_job.ini')
     job = helpers.get_hazard_job(cfg)
     hc = job.hazard_calculation
     cls.calc = get_calculator_class('hazard', hc.calculation_mode)(job)
     cls.calc.initialize_site_model()
     assert len(hc.site_collection) == 2, len(hc.site_collection)
Ejemplo n.º 30
0
 def setUp(self):
     cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
     self.job = helpers.get_hazard_job(cfg, username="******")
     for i in range(0, random.randint(1, 10)):
         models.LtRealization(
             hazard_calculation=self.job.hazard_calculation,
             ordinal=i, seed=None, weight=1 / (i + 1), sm_lt_path=[i],
             gsim_lt_path=[i]).save()
Ejemplo n.º 31
0
 def setUpClass(cls):
     cfg = helpers.get_data_path(
         'calculators/hazard/classical/haz_map_test_job.ini')
     job = helpers.get_job(cfg)
     models.JobStats.objects.create(oq_job=job)
     cls.calc = calculators(job)
     cls.calc.initialize_site_collection()
     num_sites = len(cls.calc.site_collection)
     assert num_sites == 2, num_sites
Ejemplo n.º 32
0
    def get_hazard_job(self):
        job = helpers.get_job(
            helpers.get_data_path("event_based_hazard/job.ini"),
            region_grid_spacing='0', ses_per_logic_tree_path='1')
        job.save()

        helpers.create_gmf_from_csv(job, self._test_path('gmf.csv'))

        return job
Ejemplo n.º 33
0
 def setUpClass(cls):
     cfg = helpers.get_data_path(
         'calculators/hazard/classical/haz_map_test_job.ini')
     job = helpers.get_job(cfg)
     models.JobStats.objects.create(oq_job=job)
     cls.calc = calculators(job)
     cls.calc.initialize_site_collection()
     num_sites = len(cls.calc.site_collection)
     assert num_sites == 2, num_sites
Ejemplo n.º 34
0
 def test(self):
     cfg = helpers.get_data_path('classical_job.ini')
     with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}), \
             mock.patch('openquake.engine.logs.LOG.warn') as warn:
         # using a small maximum distance of 1 km, so that no sources
         # are found, and checking that no realizations are generated
         helpers.run_job(cfg, maximum_distance=1)
         self.assertEqual(warn.call_args[0][0],
                          'No realizations for hazard_calculation_id=%d')
Ejemplo n.º 35
0
 def get_hazard_job(self):
     job = helpers.get_job(
         helpers.get_data_path("scenario_hazard/job.ini"))
     fname = self._test_path('gmf_scenario.csv')
     helpers.create_gmf_from_csv(job, fname, 'gmf_scenario')
     # this is needed to make happy the GetterBuilder
     job.hazard_calculation.number_of_ground_motion_fields = 1000
     job.hazard_calculation.save()
     return job
Ejemplo n.º 36
0
 def test_check_limits_event_based(self):
     # this is a based on a demo with 2 realizations, 5 ses,
     # 2 imt and 121 sites
     cfg = helpers.get_data_path('event_based_hazard/job.ini')
     job = helpers.get_job(cfg)
     models.JobStats.objects.create(oq_job=job)
     calc = calculators(job)
     input_weight, output_weight = calc.pre_execute()
     self.assertEqual(input_weight, 2705.5)
     self.assertAlmostEqual(output_weight, 1210.0)
Ejemplo n.º 37
0
    def get_hazard_job(self):
        job = helpers.get_job(
            helpers.get_data_path("event_based_hazard/job.ini"),
            region_grid_spacing='0',
            ses_per_logic_tree_path='1')
        job.save()

        helpers.create_gmf_from_csv(job, self._test_path('gmf.csv'))

        return job
Ejemplo n.º 38
0
 def setUp(self):
     cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
     self.job = helpers.get_job(cfg, username="******")
     for i in range(0, random.randint(1, 10)):
         lt_model = models.LtSourceModel.objects.create(
             hazard_calculation=self.job, ordinal=i, sm_lt_path=[i])
         models.LtRealization(lt_model=lt_model,
                              ordinal=i,
                              weight=1 / (i + 1),
                              gsim_lt_path=[i]).save()
Ejemplo n.º 39
0
 def test(self):
     cfg = helpers.get_data_path('bad_gsim/job.ini')
     job = helpers.get_job(cfg, username=getpass.getuser())
     calc = core.EventBasedHazardCalculator(job)
     with self.assertRaises(ValueError) as ctxt:
         calc.initialize_site_collection()
         calc.initialize_sources()
     errmsg = str(ctxt.exception)
     assert errmsg.startswith(
         "Found in 'source_model.xml' a tectonic region type "
         "'Active Shallow Crust' inconsistent with the ones"), errmsg
Ejemplo n.º 40
0
    def setUp(self):
        self.job, _ = helpers.get_fake_risk_job(
            get_data_path('classical_psha_based_risk/job.ini'),
            get_data_path('simple_fault_demo_hazard/job.ini'))
        calculator = base.RiskCalculator(self.job)
        models.JobStats.objects.create(oq_job=self.job)
        calculator.pre_execute()

        common_fake_args = dict(exposure_model=self.job.exposure_model,
                                taxonomy="test")

        asset = models.ExposureData(site=Point(0.5, 0.5),
                                    asset_ref="test1",
                                    **common_fake_args)
        asset.save()

        asset = models.ExposureData(site=Point(179.1, 0),
                                    asset_ref="test2",
                                    **common_fake_args)
        asset.save()
Ejemplo n.º 41
0
 def test(self):
     cfg = helpers.get_data_path('event_based_hazard/job.ini')
     job_id, oq = actions.job_from_file(cfg, 'test_user')
     with tempfile.NamedTemporaryFile() as temp:
         with self.assertRaises(ZeroDivisionError), mock.patch(
                 'openquake.engine.engine._do_run_calc',
                 lambda *args: 1 / 0):
             engine.run_calc(job_id, oq, 'info', temp.name, exports=[])
         logged = open(temp.name).read()
         # make sure the real error has been logged
         self.assertIn('integer division or modulo by zero', logged)
Ejemplo n.º 42
0
 def test(self):
     cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
     with mock.patch('openquake.engine.logs.LOG.warn') as warn:
         helpers.run_job(cfg,
                         number_of_logic_tree_samples=1,
                         quantile_hazard_curves='0.1 0.2',
                         hazard_maps='',
                         uniform_hazard_spectra='')
     msg = warn.call_args[0][0]
     self.assertEqual(
         msg, 'There is only one realization, the configuration'
         ' parameter quantile_hazard_curves should not be set')
Ejemplo n.º 43
0
 def test(self):
     # The bug can be reproduced with any hazard calculation profile which
     # the following parameters set:
     #
     # * number_of_logic_tree_samples = 1
     # * mean_hazard_curves = false
     # * quantile_hazard_curves =
     # * poes = at least one PoE
     cfg = helpers.get_data_path(
         'calculators/hazard/classical/haz_map_1rlz_no_stats.ini')
     job = helpers.run_job(cfg).job
     self.assertEqual(job.status, 'complete')
Ejemplo n.º 44
0
    def test_job_from_file(self):
        # make a hazard job
        haz_cfg = helpers.get_data_path('event_based_hazard/job.ini')
        haz_job = engine.job_from_file(haz_cfg, 'test_user')

        # make a fake Output
        out = models.Output.objects.create(oq_job=haz_job,
                                           display_name='fake',
                                           output_type='gmf')

        # make a risk job
        risk_cfg = helpers.get_data_path('event_based_risk/job.ini')
        with mock.patch.object(logs.LOG, 'warn') as warn:
            risk_job = engine.job_from_file(risk_cfg,
                                            'another_user',
                                            hazard_output_id=out.id)

        # make sure a warning is printed because you are using a hazard
        # generated by a different user
        self.assertEqual(
            warn.call_args[0],
            ('You are using a hazard calculation ran by %s', 'test_user'))

        with mock.patch.object(logs.LOG, 'warn') as warn:
            risk_job = engine.job_from_file(risk_cfg,
                                            'test_user',
                                            hazard_output_id=out.id,
                                            quantile_loss_curves='0.1 0.2')

        # make sure a warning is printed because you are using
        # quantile_loss_curves with a single hazard output
        self.assertEqual(
            warn.call_args[0][0],
            'quantile_loss_curves is on, but you passed a single hazard '
            'output: the statistics will not be computed')

        # make sure the hazard job is associated correctly
        self.assertEqual(risk_job.hazard_calculation.id, haz_job.id)
Ejemplo n.º 45
0
    def test(self):
        # check that if risk models are provided, then the sites
        # and the imls are got from there
        cfg = helpers.get_data_path('classical_job-sd-imt.ini')
        job = engine.job_from_file(cfg, helpers.default_user())
        job.is_running = True
        job.save()

        calc = calculators(job)
        calc.parse_risk_model()

        self.assertEqual(['PGA'], list(calc.oqparam.imtls))

        self.assertEqual(3, calc.job.exposuremodel.exposuredata_set.count())

        return job
Ejemplo n.º 46
0
    def test_check_limits_classical(self):
        # this is a based on a demo with 3 realizations, 2 sites and 4 rlzs
        cfg = helpers.get_data_path(
            'calculators/hazard/classical/haz_map_test_job.ini')
        job = helpers.get_job(cfg)
        models.JobStats.objects.create(oq_job=job)
        calc = calculators(job)
        input_weight, output_weight = calc.pre_execute()
        self.assertEqual(input_weight, 225)
        self.assertEqual(output_weight, 24)

        calc.max_input_weight = 1
        with self.assertRaises(general.InputWeightLimit):
            calc.check_limits(input_weight, output_weight)

        calc.max_input_weight = 1000
        calc.max_output_weight = 1
        with self.assertRaises(general.OutputWeightLimit):
            calc.check_limits(input_weight, output_weight)
Ejemplo n.º 47
0
    def test_disagg_hazard_export(self):
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.get_data_path('disaggregation/job.ini')

            # run the calculation in process to create something to export
            os.environ['OQ_NO_DISTRIBUTE'] = '1'
            try:
                helpers.run_job(cfg)
            finally:
                del os.environ['OQ_NO_DISTRIBUTE']

            job = models.OqJob.objects.latest('id')
            self.assertEqual(job.status, 'complete')

            outputs = core.get_outputs(job.id)

            # Test curve export:
            curves = outputs.filter(output_type='hazard_curve')
            self.assertEqual(4, len(curves))
            curve_files = []
            for curve in curves:
                curve_files.append(check_export(curve.id, target_dir))

            self.assertEqual(4, len(curve_files))
            for f in curve_files:
                self._test_exported_file(f)

            # Test disagg matrix export:
            matrices = outputs.filter(output_type='disagg_matrix')
            self.assertEqual(8, len(matrices))
            disagg_files = []
            for matrix in matrices:
                disagg_files.append(check_export(matrix.id, target_dir))

            self.assertEqual(8, len(disagg_files))
            for f in disagg_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 48
0
    def test_site_collection_and_ses_collection(self):
        cfg = helpers.get_data_path('scenario_hazard/job.ini')
        job = helpers.get_job(cfg, username=getpass.getuser())
        models.JobStats.objects.create(oq_job=job)

        calc = scen_core.ScenarioHazardCalculator(job)
        calc.initialize_site_collection()
        site_coll = calc.site_collection

        # all of the parameters should be the same:
        self.assertTrue((site_coll.vs30 == 760).all())
        self.assertTrue((site_coll.vs30measured).all())
        self.assertTrue((site_coll.z1pt0 == 100).all())
        self.assertTrue((site_coll.z2pt5 == 5).all())

        # test SESCollection
        calc.create_ruptures()
        ses_coll = models.SESCollection.objects.get(output__oq_job=job,
                                                    output__output_type='ses')
        expected_tags = [
            'scenario-0000000000',
            'scenario-0000000001',
            'scenario-0000000002',
            'scenario-0000000003',
            'scenario-0000000004',
            'scenario-0000000005',
            'scenario-0000000006',
            'scenario-0000000007',
            'scenario-0000000008',
            'scenario-0000000009',
        ]
        expected_seeds = [
            511025145, 1168723362, 794472670, 1296908407, 1343724121,
            140722153, 28278046, 1798451159, 556958504, 503221907
        ]
        for ses in ses_coll:  # there is a single ses
            self.assertEqual(ses.ordinal, 1)
            for ses_rup, tag, seed in zip(ses, expected_tags, expected_seeds):
                self.assertEqual(ses_rup.ses_id, 1)
                self.assertEqual(ses_rup.tag, tag)
                self.assertEqual(ses_rup.seed, seed)
Ejemplo n.º 49
0
    def test_get_site_collection_with_site_model(self):
        cfg = helpers.get_data_path(
            'simple_fault_demo_hazard/job_with_site_model.ini')
        job = helpers.get_job(cfg)
        models.JobStats.objects.create(oq_job=job)
        calc = cls_core.ClassicalHazardCalculator(job)

        # Bootstrap the `hazard_site` table:
        calc.initialize_site_collection()
        calc.initialize_sources()

        site_coll = calc.site_collection
        # Since we're using a pretty big site model, it's a bit excessive to
        # check each and every value.
        # Instead, we'll just test that the lenth of each site collection attr
        # is equal to the number of points of interest in the calculation.
        expected_len = len(site_coll)

        self.assertEqual(expected_len, len(site_coll.vs30))
        self.assertEqual(expected_len, len(site_coll.vs30measured))
        self.assertEqual(expected_len, len(site_coll.z1pt0))
        self.assertEqual(expected_len, len(site_coll.z2pt5))
Ejemplo n.º 50
0
 def get_hazard_job(self):
     job = helpers.get_job(helpers.get_data_path("scenario_hazard/job.ini"))
     fname = self._test_path('gmf_scenario.csv')
     helpers.create_gmf_from_csv(job, fname, 'gmf_scenario')
     return job
Ejemplo n.º 51
0
    def test_classical_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_classical.ini')
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_classical.ini')

            haz_job = helpers.run_job(haz_cfg).job
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_job(risk_cfg,
                                       hazard_calculation_id=haz_job.id).job

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # 16 logic tree realizations + 1 mean + 2 quantiles = 19
            # + 19 insured loss curves
            self.assertEqual(38, loss_curve_outputs.count())
            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job, insured=False)
            # sanity check
            self.assertEqual(19, loss_curves.count())

            insured_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job, insured=True)
            # sanity check
            self.assertEqual(19, insured_curves.count())

            # mean
            self.assertEqual(1, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(2,
                             loss_curves.filter(statistics='quantile').count())

            # mean
            self.assertEqual(1,
                             insured_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                2,
                insured_curves.filter(statistics='quantile').count())

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 19 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(19, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(core.export(o.id, target_dir, 'xml'))

            self.assertEqual(38, len(loss_curve_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 52
0
 def _setup_a_new_calculator(self):
     cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
     job = helpers.get_job(cfg, username=getpass.getuser())
     calc = core.ClassicalHazardCalculator(job)
     return job, calc
Ejemplo n.º 53
0
    def test_export_for_event_based(self):
        # Run an event-based hazard calculation to compute SESs and GMFs
        # Call the exporters for both SES and GMF results  and verify that
        # files were created
        # Since the XML writers (in `openquake.commonlib`) are concerned
        # with correctly generating the XML, we don't test that here
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.get_data_path('event_based_hazard/job.ini')

            # run the calculation in process to create something to export
            with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}):
                job = helpers.run_job(cfg,
                                      maximum_distance=1,
                                      ses_per_logic_tree_path=1,
                                      number_of_logic_tree_samples=1).job
            self.assertEqual(job.status, 'complete')

            # 1 SES + 1 GMF + 1 hazard_curve_multi + 2 hazard_curve +
            # 4 hazard maps (with poes 0.1, 0.2 and IMT PGA, SA(0.1))
            outputs = core.get_outputs(job.id)
            self.assertEqual(9, len(outputs))

            # SESs
            ses_outputs = outputs.filter(output_type='ses')
            self.assertEqual(1, len(ses_outputs))

            exported_files = []
            for ses_output in ses_outputs:
                out_file = check_export(ses_output.id, target_dir)
                exported_files.append(out_file)

            self.assertEqual(1, len(exported_files))

            for f in exported_files:
                self._test_exported_file(f)

            # GMFs
            gmf_outputs = outputs.filter(output_type='gmf')
            self.assertEqual(1, len(gmf_outputs))

            exported_files = []
            for gmf_output in gmf_outputs:
                out_file = check_export(gmf_output.id, target_dir)
                exported_files.append(out_file)

            self.assertEqual(1, len(exported_files))
            # Check the file paths exist, are absolute, and the files aren't
            # empty.
            for f in exported_files:
                self._test_exported_file(f)

            # check the exact values of the GMFs
            [gmfset1] = gmf_outputs[0].gmf
            self.check_file_content('expected_gmfset_1.txt', str(gmfset1))

            # Hazard curves
            haz_curves = outputs.filter(output_type='hazard_curve')
            self.assertEqual(2, haz_curves.count())
            for curve in haz_curves:
                exported_file = check_export(curve.id, target_dir)
                self._test_exported_file(exported_file)

            # Hazard maps
            haz_maps = outputs.filter(output_type='hazard_map')
            self.assertEqual(4, haz_maps.count())
            for hmap in haz_maps:
                exported_file = check_export(hmap.id, target_dir)
                self._test_exported_file(exported_file)
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 54
0
    def test_classical_hazard_export(self):
        # Run a hazard calculation to compute some curves and maps
        # Call the exporter and verify that files were created
        # Since the hazard curve XML writer is concerned with correctly
        # generating XML, we won't test that here.
        target_dir = tempfile.mkdtemp()

        try:
            cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')

            # run the calculation to create something to export
            helpers.run_job(cfg)

            job = models.OqJob.objects.latest('id')
            self.assertEqual(job.status, 'complete')

            outputs = core.get_outputs(job.id)

            # 10 hazard curves, 20 maps, 10 uhs, 5 multi curves
            expected_outputs = 45
            self.assertEqual(expected_outputs, outputs.count())

            # Number of curves:
            # (2 imts * 2 realizations)
            # + (2 imts * (1 mean + 2 quantiles)
            # = 10
            curves = outputs.filter(output_type='hazard_curve')
            self.assertEqual(10, curves.count())

            # Number of multi-curves
            # (2 realizations + 1 mean + 2 quantiles)
            multi_curves = outputs.filter(output_type="hazard_curve_multi")
            self.assertEqual(5, multi_curves.count())

            # Number of maps:
            # (2 poes * 2 imts * 2 realizations)
            # + (2 poes * 2 imts * (1 mean + 2 quantiles))
            # = 20
            # Number of UHS:
            maps = outputs.filter(output_type='hazard_map')
            self.assertEqual(20, maps.count())

            # Number of UHS:
            # (20 maps_PGA_SA / 2 poes)
            # = 10
            uhs = outputs.filter(output_type='uh_spectra')
            self.assertEqual(10, uhs.count())

            # Test hazard curve export:
            hc_files = []
            for curve in curves:
                hc_files.append(check_export(curve.id, target_dir))

            self.assertEqual(10, len(hc_files))

            # Test multi hazard curve export:
            hc_files = []
            for curve in multi_curves:
                hc_files.append(check_export(curve.id, target_dir))

            self.assertEqual(5, len(hc_files))

            for f in hc_files:
                self._test_exported_file(f)

            # Test hazard map export:
            hm_files = []
            for haz_map in maps:
                hm_files.append(check_export(haz_map.id, target_dir))

            self.assertEqual(20, len(hm_files))

            for f in hm_files:
                self._test_exported_file(f)

            # Test UHS export:
            uhs_files = []
            for u in uhs:
                uhs_files.append(check_export(u.id, target_dir))
            for f in uhs_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 55
0
 def test(self):
     cfg = helpers.get_data_path('classical_job.ini')
     with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}):
         with self.assertRaises(RuntimeError):
             helpers.run_job(cfg, maximum_distance=1)
Ejemplo n.º 56
0
 def get_hazard_job(self):
     job = helpers.get_job(helpers.get_data_path("scenario_hazard/job.ini"),
                           number_of_ground_motion_fields=1000)
     fname = self._test_path('gmf_scenario.csv')
     helpers.create_gmf_from_csv(job, fname, 'gmf_scenario')
     return job
Ejemplo n.º 57
0
    def test_event_based_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_event_based.ini')
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_event_based.ini')

            haz_job = helpers.run_job(haz_cfg).job
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_job(risk_cfg,
                                       hazard_calculation_id=haz_job.id).job

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            agg_loss_curve_outputs = risk_outputs.filter(
                output_type='agg_loss_curve')
            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # (1 mean + 2 quantiles) * 2 (as there also insured curves)
            self.assertEqual(6, loss_curve_outputs.count())

            # 16 rlzs + 16 (due to insured curves)
            event_loss_curve_outputs = risk_outputs.filter(
                output_type='event_loss_curve')
            self.assertEqual(32, event_loss_curve_outputs.count())
            self.assertEqual(16, agg_loss_curve_outputs.count())

            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job)
            # sanity check (16 aggregate loss curve + 38 loss curves)
            self.assertEqual(54, loss_curves.count())
            # mean
            self.assertEqual(2, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(4,
                             loss_curves.filter(statistics='quantile').count())

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 16 event loss table (1 per rlz)
            event_loss_tables = risk_outputs.filter(output_type="event_loss")
            self.assertEqual(16, event_loss_tables.count())

            # 32 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(32, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))
            for o in loss_fraction_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))
            for o in event_loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))

            agg_loss_curve_files = []
            for o in agg_loss_curve_outputs:
                agg_loss_curve_files.append(
                    core.export(o.id, target_dir, 'xml'))

            event_loss_table_files = []
            for o in event_loss_tables:
                event_loss_table_files.append(
                    core.export(o.id, target_dir, 'csv'))

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(core.export(o.id, target_dir, 'xml'))

            self.assertEqual(70, len(loss_curve_files))
            self.assertEqual(16, len(agg_loss_curve_files))
            self.assertEqual(16, len(event_loss_table_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Ejemplo n.º 58
0
 def get_hazard_job(self):
     job = helpers.get_job(helpers.get_data_path("scenario_hazard/job.ini"))
     fname = os.path.join(os.path.dirname(case_1.__file__),
                          'gmf_scenario.csv')
     helpers.create_gmf_from_csv(job, fname, 'gmf_scenario')
     return job
Ejemplo n.º 59
0
 def setUpClass(cls):
     cls.hazard_cfg = helpers.get_data_path(
         'simple_fault_demo_hazard/job.ini')
     cls.risk_cfg = helpers.get_data_path(
         'classical_psha_based_risk/job.ini')
Ejemplo n.º 60
0
 def _setup_a_new_calculator(self):
     cfg = helpers.get_data_path('disaggregation/job.ini')
     job = helpers.get_job(cfg, username=getpass.getuser())
     calc = core.DisaggHazardCalculator(job)
     return job, calc