def test_pre_execute_check_imts_raises(self): haz_job = engine.prepare_job() cfg = helpers.get_data_path('classical_job.ini') params, files = engine.parse_config(open(cfg, 'r')) haz_job.hazard_calculation = engine.create_hazard_calculation( haz_job.owner, params, files.values()) haz_job.save() hazard_curve_output = models.Output.objects.create_output( haz_job, 'test_hazard_curve', 'hazard_curve' ) models.HazardCurve.objects.create( output=hazard_curve_output, investigation_time=50.0, imt='PGV', # the vulnerability model only defines SA(0.1) statistics='mean' ) cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_classical.ini') risk_job = helpers.get_risk_job( cfg, hazard_output_id=hazard_curve_output.id ) models.JobStats.objects.create(oq_job=risk_job) calc = classical.ClassicalRiskCalculator(risk_job) # Check for compatibility between the IMTs defined in the vulnerability # model and the chosen hazard output (--hazard-output-id) with self.assertRaises(ValueError) as ar: calc.pre_execute() self.assertEqual( "There is no hazard output for: SA(0.1). " "The available IMTs are: PGA.", ar.exception.message)
def setUp(self): self.job, _ = helpers.get_fake_risk_job( get_data_path('event_based_bcr/job.ini'), get_data_path('event_based_hazard/job.ini'), output_type="gmf") self.calculator = core.EventBasedBCRRiskCalculator(self.job) models.JobStats.objects.create(oq_job=self.job)
def setUp(self): self.job, _ = helpers.get_fake_risk_job( get_data_path('classical_bcr/job.ini'), get_data_path('simple_fault_demo_hazard/job.ini')) self.calculator = classical_bcr.ClassicalBCRRiskCalculator(self.job) models.JobStats.objects.create(oq_job=self.job)
def test_pre_execute_check_imts_no_errors(self): haz_job = engine.prepare_job() cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_haz_classical.ini') params, files = engine.parse_config(open(cfg, 'r')) haz_job.hazard_calculation = engine.create_hazard_calculation( haz_job.owner, params, files.values()) haz_job.save() hazard_curve_output = models.Output.objects.create_output( haz_job, 'test_hazard_curve', 'hazard_curve' ) models.HazardCurve.objects.create( output=hazard_curve_output, investigation_time=50.0, # this imt is compatible with the vuln model imt='SA', sa_period=0.025, sa_damping=5.0, statistics='mean' ) cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_classical.ini') risk_job = helpers.get_risk_job( cfg, hazard_output_id=hazard_curve_output.id ) models.JobStats.objects.create(oq_job=risk_job) calc = classical.ClassicalRiskCalculator(risk_job) # In contrast to the test above (`test_pre_execute_check_imts_raises`), # we expect no errors to be raised. calc.pre_execute()
def setUp(self): self.generated_files = [] self.job = Job.from_file(helpers.get_data_path(CONFIG_FILE)) self.job_with_includes = Job.from_file( helpers.get_data_path(CONFIG_WITH_INCLUDES)) self.generated_files.append(self.job.super_config_path) self.generated_files.append(self.job_with_includes.super_config_path)
def test_with_input_type(self): job, files = helpers.get_fake_risk_job( get_data_path('classical_psha_based_risk/job.ini'), get_data_path('simple_fault_demo_hazard/job.ini')) rc = job.risk_calculation inputs = models.inputs4rcalc(rc.id, input_type='exposure') self.assertEqual(1, inputs.count())
def setUp(self): client = kvs.get_client() # Delete managed job id info so we can predict the job key # which will be allocated for us client.delete(kvs.tokens.CURRENT_JOBS) self.generated_files = [] self.job = helpers.job_from_file(helpers.get_data_path(CONFIG_FILE)) self.job_with_includes = helpers.job_from_file(helpers.get_data_path(CONFIG_WITH_INCLUDES))
def setUp(self): self.job, _ = helpers.get_fake_risk_job( get_data_path("event_based_risk/job.ini"), get_data_path("event_based_hazard/job.ini"), output_type="gmf" ) self.calculator = event_based.EventBasedRiskCalculator(self.job) models.JobStats.objects.create(oq_job=self.job) self.calculator.pre_execute() self.job.is_running = True self.job.status = "executing" self.job.save()
def setUp(self): job, _ = helpers.get_fake_risk_job( get_data_path('classical_psha_based_risk/job.ini'), get_data_path('simple_fault_demo_hazard/job.ini')) self.compulsory_arguments = dict(lrem_steps_per_interval=5) self.other_args = dict( calculation_mode="classical", region_constraint=( 'POLYGON((-122.0 38.113, -122.114 38.113, -122.57 38.111, ' '-122.0 38.113))'), hazard_output=job.risk_calculation.hazard_output)
def setUp(self): self.job, _ = helpers.get_fake_risk_job( get_data_path('scenario_risk/job.ini'), get_data_path('scenario_hazard/job.ini'), output_type="gmf_scenario") self.calculator = scenario.ScenarioRiskCalculator(self.job) models.JobStats.objects.create(oq_job=self.job) self.calculator.pre_execute() self.job.is_running = True self.job.status = 'executing' self.job.save()
def setUp(self): self.job, _ = helpers.get_fake_risk_job( get_data_path('event_based_risk/job.ini'), get_data_path('event_based_hazard/job.ini'), output_type="gmf") self.calculator = event_based.EventBasedRiskCalculator(self.job) models.JobStats.objects.create(oq_job=self.job) self.calculator.pre_execute() self.job.is_running = True self.job.status = 'executing' self.job.save()
def setUp(self): job, _ = helpers.get_fake_risk_job( get_data_path('classical_psha_based_risk/job.ini'), get_data_path('simple_fault_demo_hazard/job.ini') ) self.compulsory_arguments = dict( lrem_steps_per_interval=5) self.other_args = dict( calculation_mode="classical", region_constraint=( 'POLYGON((-122.0 38.113, -122.114 38.113, -122.57 38.111, ' '-122.0 38.113))'), hazard_output=job.risk_calculation.hazard_output)
def test_a_few_inputs(self): job, files = helpers.get_fake_risk_job( get_data_path('classical_psha_based_risk/job.ini'), get_data_path('simple_fault_demo_hazard/job.ini')) rc = job.risk_calculation expected_ids = sorted([x.id for x in files.values()]) inputs = models.inputs4rcalc(rc.id) actual_ids = sorted([x.id for x in inputs]) self.assertEqual(expected_ids, actual_ids)
def test_with_input_type(self): job, files = helpers.get_fake_risk_job( get_data_path('classical_psha_based_risk/job.ini'), get_data_path('simple_fault_demo_hazard/job.ini')) rc = job.risk_calculation # It should only be 1 id, actually. expected_ids = [x.id for x in files.values() if x.input_type == 'exposure'] inputs = models.inputs4rcalc(rc.id, input_type='exposure') actual_ids = sorted([x.id for x in inputs]) self.assertEqual(expected_ids, actual_ids)
def test_validate_warns(self): # Test that `validate` raises warnings if unnecessary parameters are # specified for a given calculation. # For example, `ses_per_logic_tree_path` is an event-based hazard # param; if this param is specified for a classical hazard job, a # warning should be raised. cfg_file = helpers.get_data_path('simple_fault_demo_hazard/job.ini') job = engine.prepare_job() params = engine.parse_config(open(cfg_file, 'r')) # Add a few superfluous parameters: params['ses_per_logic_tree_path'] = 5 params['ground_motion_correlation_model'] = 'JB2009' calculation = engine.create_calculation(models.HazardCalculation, params) job.hazard_calculation = calculation job.save() with warnings.catch_warnings(record=True) as w: validation.validate(job, 'hazard', params, ['xml']) expected_warnings = [ "Unknown parameter '%s' for calculation mode 'classical'." " Ignoring." % x for x in ('ses_per_logic_tree_path', 'ground_motion_correlation_model') ] actual_warnings = [m.message.message for m in w] self.assertEqual(sorted(expected_warnings), sorted(actual_warnings))
def test_prepares_blocks_using_the_input_region(self): """ This test might be currently catastrophically retarded. If it is blame Lars. """ block_path = helpers.get_data_path(BLOCK_SPLIT_TEST_FILE) print "In open job" a_job = Job.from_file(block_path) self.generated_files.append(a_job.super_config_path) verts = [float(x) for x in a_job.params['REGION_VERTEX'].split(",")] # Flips lon and lat, and builds a list of coord tuples coords = zip(verts[1::2], verts[::2]) expected = shapes.RegionConstraint.from_coordinates(coords) expected.cell_size = float(a_job.params['REGION_GRID_SPACING']) expected_sites = [] for site in expected: print site expected_sites.append(site) a_job._partition() blocks_keys = a_job.blocks_keys print blocks_keys self.assertEqual(1, len(blocks_keys)) self.assertEqual(job.Block(expected_sites), job.Block.from_kvs(blocks_keys[0]))
def setUp(self): client = kvs.get_client() # Delete managed job id info so we can predict the job key # which will be allocated for us client.delete(kvs.tokens.CURRENT_JOBS) self.generated_files = [] job = engine.prepare_job() jp, params, sections = import_job_profile(helpers.get_data_path(CONFIG_FILE), job) self.job_ctxt = JobContext(params, job.id, sections=sections, oq_job_profile=jp, oq_job=job) job = engine.prepare_job() jp, params, sections = import_job_profile(helpers.get_data_path(CONFIG_WITH_INCLUDES), job) self.job_ctxt_with_includes = JobContext(params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)
def test_successful_job_lifecycle(self): with patch('openquake.job.Job.from_file') as from_file: # called in place of Job.launch def test_status_running_and_succeed(): self.assertEquals('running', self._job_status()) return [] # replaces Job.launch with a mock def patch_job_launch(*args, **kwargs): self.job = self.job_from_file(*args, **kwargs) self.job.launch = mock.Mock( side_effect=test_status_running_and_succeed) self.assertEquals('pending', self._job_status()) return self.job from_file.side_effect = patch_job_launch with patch('openquake.job.spawn_job_supervisor'): run_job(helpers.get_data_path(CONFIG_FILE), 'db') self.assertEquals(1, self.job.launch.call_count) self.assertEquals('succeeded', self._job_status())
def test_export_for_scenario(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('scenario_hazard/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) self.assertEqual(1, len(outputs)) # 1 GMF gmf_outputs = outputs.filter(output_type='gmf_scenario') self.assertEqual(1, len(gmf_outputs)) exported_file = check_export(gmf_outputs[0].id, target_dir) # Check the file paths exist, is absolute, and the file isn't # empty. self._test_exported_file(exported_file) # Check for the correct number of GMFs in the file: tree = etree.parse(exported_file) self.assertEqual(20, number_of('nrml:gmf', tree)) finally: shutil.rmtree(target_dir)
def test_http_handler_writes_a_file(self): class StubbedHTTPConnection(StubbedGetter): def __enter__(self): return self def __exit__(self, *args): pass def request(self, req_type, path): self.remote_path = path return self def getresponse(self): return self def read(self): with open(self.remote_path, "r") as reader: return reader.read() expected_path = "/tmp/fake_file" remote_path = "http://localhost/%s" % helpers.get_data_path("config.gem") url = urlparse.urlparse(remote_path) http_handler = handlers.HTTPHandler(url, expected_path) guaranteed_file = http_handler.handle(getter=StubbedHTTPConnection) self.assertTrue(os.path.isfile(guaranteed_file)) os.unlink(guaranteed_file)
def test_failed_db_job_lifecycle(self): with patch('openquake.job.Job.from_file') as from_file: # called in place of Job.launch def test_status_running_and_fail(): self.assertEquals('running', self._job_status()) session = get_db_session("uiapi", "writer") session.query(OqJob).filter(OqJob.id == -1).one() # replaces Job.launch with a mock def patch_job_launch(*args, **kwargs): self.job = self.job_from_file(*args, **kwargs) self.job.launch = mock.Mock( side_effect=test_status_running_and_fail) self.assertEquals('pending', self._job_status()) return self.job from_file.side_effect = patch_job_launch self.assertRaises(sqlalchemy.exc.SQLAlchemyError, run_job, helpers.get_data_path(CONFIG_FILE), 'db') self.assertEquals(1, self.job.launch.call_count) self.assertEquals('failed', self._job_status())
def test_serialize(self): expected_file = helpers.get_data_path( "expected-collapse-map.xml") expected_text = open(expected_file, "r").readlines() asset_1, asset_2, asset_3, asset_4 = self.make_assets() self.make_map() self.make_data(asset_1, 1.6, 1.7) self.make_data(asset_2, 2.9, 3.1) self.make_data(asset_3, 4.9, 5.1) self.make_data(asset_4, 10.6, 11.7) try: _, result_xml = tempfile.mkstemp() writer = CollapseMapXMLWriter(result_xml, "ebl1") writer.serialize(self.data) actual_text = open(result_xml, "r").readlines() self.assertEqual(expected_text, actual_text) self.assertTrue(xml.validates_against_xml_schema( result_xml)) finally: os.unlink(result_xml)
def test_serialize(self): expected_file = helpers.get_data_path( "expected-dmg-dist-total.xml") expected_text = open(expected_file, "r").readlines() self.make_dist() self.make_data("no_damage", 1.0, 1.6) self.make_data("slight", 34.8, 18.3) self.make_data("moderate", 64.2, 19.8) self.make_data("extensive", 64.3, 19.7) self.make_data("complete", 64.3, 19.7) try: _, result_xml = tempfile.mkstemp() writer = DmgDistTotalXMLWriter(result_xml, "ebl1", self.damage_states) writer.serialize(self.data) actual_text = open(result_xml, "r").readlines() self.assertEqual(expected_text, actual_text) self.assertTrue(xml.validates_against_xml_schema( result_xml)) finally: os.unlink(result_xml)
def test_failed_job_lifecycle(self): with patch('openquake.job.Job.from_file') as from_file: # called in place of Job.launch def test_status_running_and_fail(): self.assertEquals('running', self._job_status()) raise Exception('OMG!') # replaces Job.launch with a mock def patch_job_launch(*args, **kwargs): self.job = self.job_from_file(*args, **kwargs) self.job.launch = mock.Mock( side_effect=test_status_running_and_fail) self.assertEquals('pending', self._job_status()) return self.job from_file.side_effect = patch_job_launch self.assertRaises(Exception, run_job, helpers.get_data_path(CONFIG_FILE), 'db') self.assertEquals(1, self.job.launch.call_count) self.assertEquals('failed', self._job_status())
def get_hazard_job(self): job = helpers.get_hazard_job( helpers.get_data_path("event_based_hazard/job.ini")) helpers.create_gmf_from_csv(job, os.path.join( os.path.dirname(__file__), 'gmf.csv')) return job
def setUp(self): self.cfg = helpers.get_data_path('event_based_hazard/job_2.ini') self.job = helpers.get_hazard_job(self.cfg, username=getpass.getuser()) self.calc = core.EventBasedHazardCalculator(self.job) hc_id = self.job.hazard_calculation.id models.SiteCollection.cache[hc_id] = make_site_coll(0, 0, n=5) models.JobStats.objects.create(oq_job=self.job)
def setUp(self): self.gmf_string = open(helpers.get_data_path("gmfs.json")).readline() region = shapes.Region.from_coordinates( [(-118.30, 34.12), (-118.18, 34.12), (-118.18, 34.00), (-118.30, 34.00)]) region.cell_size = 0.02 self.grid = region.grid
def setUp(self): # Patch a few methods here and restore them in the tearDown to avoid # too many nested with # See http://www.voidspace.org.uk/python/mock/patch.html \ # #patch-methods-start-and-stop self.patchers = [] def start_patch(attr_path): _, attr = attr_path.rsplit('.', 1) patcher = patch(attr_path) self.patchers.append(patcher) setattr(self, attr, patcher.start()) start_patch('openquake.engine.supervising.is_pid_running') # Patch the actions taken by the supervisor start_patch('openquake.engine.supervising.supervisor.\ record_job_stop_time') start_patch( 'openquake.engine.supervising.supervisor.cleanup_after_job') start_patch('openquake.engine.supervising.supervisor.terminate_job') start_patch('openquake.engine.supervising.supervisor.get_job_status') start_patch('openquake.engine.supervising.supervisor' '.update_job_status') logging.root.setLevel(logging.CRITICAL) cfg = get_data_path('end-to-end-hazard-risk/job_haz_classical.ini') self.job = get_hazard_job(cfg)
def test_store_site_model(self): # Setup site_model = helpers.get_data_path("site_model.xml") exp_site_model = [ dict(lon=-122.5, lat=37.5, vs30=800.0, vs30_type="measured", z1pt0=100.0, z2pt5=5.0), dict(lon=-122.6, lat=37.6, vs30=801.0, vs30_type="measured", z1pt0=101.0, z2pt5=5.1), dict(lon=-122.7, lat=37.7, vs30=802.0, vs30_type="measured", z1pt0=102.0, z2pt5=5.2), dict(lon=-122.8, lat=37.8, vs30=803.0, vs30_type="measured", z1pt0=103.0, z2pt5=5.3), dict(lon=-122.9, lat=37.9, vs30=804.0, vs30_type="measured", z1pt0=104.0, z2pt5=5.4), ] job = models.OqJob.objects.create(user_name="openquake") ids = general.store_site_model(job, site_model) actual_site_model = models.SiteModel.objects.filter(job=job).order_by("id") for i, exp in enumerate(exp_site_model): act = actual_site_model[i] self.assertAlmostEqual(exp["lon"], act.location.x) self.assertAlmostEqual(exp["lat"], act.location.y) self.assertAlmostEqual(exp["vs30"], act.vs30) self.assertEqual(exp["vs30_type"], act.vs30_type) self.assertAlmostEqual(exp["z1pt0"], act.z1pt0) self.assertAlmostEqual(exp["z2pt5"], act.z2pt5) # last, check that the `store_site_model` function returns all of the # newly-inserted records for i, s in enumerate(ids): self.assertEqual(s, actual_site_model[i].id)
def get_hazard_job(self): job = helpers.get_hazard_job( helpers.get_data_path("simple_fault_demo_hazard/job.ini")) hazard_curve = [ (0.001, 0.0398612669790014), (0.01, 0.039861266979001400), (0.05, 0.039728757480298900), (0.10, 0.029613426625612500), (0.15, 0.019827328756491600), (0.20, 0.013062270161451900), (0.25, 0.008655387950000430), (0.30, 0.005898520593689670), (0.35, 0.004061698589511780), (0.40, 0.002811727179526820), (0.45, 0.001995117417776690), (0.50, 0.001358705972845710), (0.55, 0.000989667841573727), (0.60, 0.000757544444296432), (0.70, 0.000272824002045979), (0.80, 0.00), (0.9, 0.00), (1.0, 0.00)] models.HazardCurveData.objects.create( hazard_curve=models.HazardCurve.objects.create( output=models.Output.objects.create_output( job, "Test Hazard curve", "hazard_curve"), investigation_time=50, imt="PGA", imls=[hz[0] for hz in hazard_curve], statistics="mean"), poes=[hz[1] for hz in hazard_curve], location="POINT(1 1)") return job
def get_hazard_job(self): job = helpers.get_hazard_job( helpers.get_data_path("simple_fault_demo_hazard/job.ini")) hazard_curve = [(0.001, 0.0398612669790014), (0.01, 0.039861266979001400), (0.05, 0.039728757480298900), (0.10, 0.029613426625612500), (0.15, 0.019827328756491600), (0.20, 0.013062270161451900), (0.25, 0.008655387950000430), (0.30, 0.005898520593689670), (0.35, 0.004061698589511780), (0.40, 0.002811727179526820), (0.45, 0.001995117417776690), (0.50, 0.001358705972845710), (0.55, 0.000989667841573727), (0.60, 0.000757544444296432), (0.70, 0.000272824002045979), (0.80, 0.00), (0.9, 0.00), (1.0, 0.00)] models.HazardCurveData.objects.create( hazard_curve=models.HazardCurve.objects.create( output=models.Output.objects.create_output( job, "Test Hazard curve", "hazard_curve"), investigation_time=50, imt="PGA", imls=[hz[0] for hz in hazard_curve], statistics="mean"), poes=[hz[1] for hz in hazard_curve], location="POINT(1 1)") return job
def test_export_for_scenario(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('scenario_hazard/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) self.assertEqual(1, len(outputs)) # 1 GMF gmf_outputs = outputs.filter(output_type='gmf_scenario') self.assertEqual(1, len(gmf_outputs)) exported_files = check_export(gmf_outputs[0].id, target_dir) self.assertEqual(1, len(exported_files)) # Check the file paths exist, is absolute, and the file isn't # empty. f = exported_files[0] self._test_exported_file(f) # Check for the correct number of GMFs in the file: tree = etree.parse(f) self.assertEqual(20, number_of('nrml:gmf', tree)) finally: shutil.rmtree(target_dir)
def test_initialize_site_model(self): # we need a slightly different config file for this test cfg = helpers.get_data_path( 'simple_fault_demo_hazard/job_with_site_model.ini') self.job = helpers.get_hazard_job(cfg) self.calc = core.ClassicalHazardCalculator(self.job) self.calc.initialize_site_model() # If the site model isn't valid for the calculation geometry, a # `RuntimeError` should be raised here # Okay, it's all good. Now check the count of the site model records. sm_nodes = models.SiteModel.objects.filter(job=self.job) self.assertEqual(2601, len(sm_nodes)) num_pts_to_compute = len( self.job.hazard_calculation.points_to_compute()) hazard_site = models.HazardSite.objects.filter( hazard_calculation=self.job.hazard_calculation) # The site model is good. Now test that `hazard_site` was computed. # For now, just test the length. self.assertEqual(num_pts_to_compute, len(hazard_site))
def test_validate_warns(self): # Test that `validate` raises warnings if unnecessary parameters are # specified for a given calculation. # For example, `ses_per_logic_tree_path` is an event-based hazard # param; if this param is specified for a classical hazard job, a # warning should be raised. cfg_file = helpers.get_data_path('simple_fault_demo_hazard/job.ini') job = engine.prepare_job() params = engine.parse_config(open(cfg_file, 'r')) # Add a few superfluous parameters: params['ses_per_logic_tree_path'] = 5 params['ground_motion_correlation_model'] = 'JB2009' calculation = engine.create_calculation( models.HazardCalculation, params) job.hazard_calculation = calculation job.save() with warnings.catch_warnings(record=True) as w: validation.validate(job, 'hazard', params, ['xml']) expected_warnings = [ "Unknown parameter '%s' for calculation mode 'classical'." " Ignoring." % x for x in ('ses_per_logic_tree_path', 'ground_motion_correlation_model') ] actual_warnings = [m.message.message for m in w] self.assertEqual(sorted(expected_warnings), sorted(actual_warnings))
def setUpClass(cls): cfg = helpers.get_data_path('event_based_hazard/job.ini') job = helpers.get_hazard_job(cfg) rlz1 = models.LtRealization.objects.create( hazard_calculation=job.hazard_calculation, ordinal=1, seed=1, weight=None, sm_lt_path="test_sm", gsim_lt_path="test_gsim", is_complete=False, total_items=1, completed_items=1) rlz2 = models.LtRealization.objects.create( hazard_calculation=job.hazard_calculation, ordinal=2, seed=1, weight=None, sm_lt_path="test_sm", gsim_lt_path="test_gsim", is_complete=False, total_items=1, completed_items=1) ses_coll1 = models.SESCollection.objects.create( output=models.Output.objects.create_output( job, "Test SES Collection 1", "ses"), lt_realization=rlz1) ses_coll2 = models.SESCollection.objects.create( output=models.Output.objects.create_output( job, "Test SES Collection 2", "ses"), lt_realization=rlz2) gmf_data1 = helpers.create_gmf_data_records(job, rlz1, ses_coll1)[0] points = [(15.3, 38.22), (15.7, 37.22), (15.4, 38.09), (15.56, 38.1), (15.2, 38.2)] gmf_data2 = helpers.create_gmf_data_records( job, rlz2, ses_coll2, points)[0] cls.gmf_coll1 = gmf_data1.gmf cls.ruptures1 = tuple(get_tags(gmf_data1)) cls.ruptures2 = tuple(get_tags(gmf_data2)) cls.investigation_time = job.hazard_calculation.investigation_time
def test_http_handler_writes_a_file(self): class StubbedHTTPConnection(StubbedGetter): def __enter__(self): return self def __exit__(self, *args): pass def request(self, req_type, path): self.remote_path = path return self def getresponse(self): return self def read(self): with open(self.remote_path, "r") as reader: return reader.read() expected_path = "/tmp/fake_file" remote_path = "http://localhost/%s" % helpers.get_data_path( "config.gem") url = urlparse.urlparse(remote_path) http_handler = handlers.HTTPHandler(url, expected_path) guaranteed_file = http_handler.handle(getter=StubbedHTTPConnection) self.assertTrue(os.path.isfile(guaranteed_file)) os.unlink(guaranteed_file)
def setUp(self): self.job_from_file = engine._job_from_file self.init_logs_amqp_send = patch('openquake.logs.init_logs_amqp_send') self.init_logs_amqp_send.start() self.job = engine.prepare_job() self.job_profile, self.params, self.sections = ( engine.import_job_profile(helpers.get_data_path(CONFIG_FILE), self.job))
def setUp(self): self.gmf_string = open(helpers.get_data_path("gmfs.json")).readline() region = shapes.Region.from_coordinates([(-118.30, 34.12), (-118.18, 34.12), (-118.18, 34.00), (-118.30, 34.00)]) region.cell_size = 0.02 self.grid = region.grid
class HazardCurveGetterPerAssetTestCase(unittest.TestCase): hazard_demo = get_data_path('simple_fault_demo_hazard/job.ini') risk_demo = get_data_path('classical_psha_based_risk/job.ini') hazard_output_type = 'curve' getter_class = hazard_getters.HazardCurveGetterPerAsset taxonomy = 'VF' def setUp(self): self.job, _ = helpers.get_fake_risk_job( self.risk_demo, self.hazard_demo, self.hazard_output_type) # need to run pre-execute to parse exposure model calc = RiskCalculator(self.job) models.JobStats.objects.create(oq_job=self.job) calc.pre_execute() self._assets = models.ExposureData.objects.filter( exposure_model=self.job.risk_calculation.exposure_model).order_by( 'asset_ref') self.getter = self.getter_class(self.ho(), self.assets(), 500, "PGA") def test_is_pickleable(self): pickle.dumps(self.getter) # raises an error if not def ho(self): return [self.job.risk_calculation.hazard_output] def test_call(self): _hid, assets, values = self.getter().next() self.assertEqual([a.id for a in self.assets()], [a.id for a in assets]) numpy.testing.assert_allclose( [[(0.1, 0.1), (0.2, 0.2), (0.3, 0.3)], [(0.1, 0.1), (0.2, 0.2), (0.3, 0.3)], [(0.1, 0.1), (0.2, 0.2), (0.3, 0.3)]], values) def assets(self): return self._assets.filter(taxonomy=self.taxonomy) def test_filter(self): self.getter.max_distance = 0.00001 # 1 cm _hid, assets, curves = self.getter().next() self.assertEqual([], curves) self.assertEqual([], assets)
def test_store_site_model(self): # Setup site_model = helpers.get_data_path('site_model.xml') exp_site_model = [ dict(lon=-122.5, lat=37.5, vs30=800.0, vs30_type="measured", z1pt0=100.0, z2pt5=5.0), dict(lon=-122.6, lat=37.6, vs30=801.0, vs30_type="measured", z1pt0=101.0, z2pt5=5.1), dict(lon=-122.7, lat=37.7, vs30=802.0, vs30_type="measured", z1pt0=102.0, z2pt5=5.2), dict(lon=-122.8, lat=37.8, vs30=803.0, vs30_type="measured", z1pt0=103.0, z2pt5=5.3), dict(lon=-122.9, lat=37.9, vs30=804.0, vs30_type="measured", z1pt0=104.0, z2pt5=5.4), ] job = models.OqJob.objects.create(user_name="openquake") ids = general.store_site_model(job, site_model) actual_site_model = models.SiteModel.objects.filter( job=job).order_by('id') for i, exp in enumerate(exp_site_model): act = actual_site_model[i] self.assertAlmostEqual(exp['lon'], act.location.x) self.assertAlmostEqual(exp['lat'], act.location.y) self.assertAlmostEqual(exp['vs30'], act.vs30) self.assertEqual(exp['vs30_type'], act.vs30_type) self.assertAlmostEqual(exp['z1pt0'], act.z1pt0) self.assertAlmostEqual(exp['z2pt5'], act.z2pt5) # last, check that the `store_site_model` function returns all of the # newly-inserted records for i, s in enumerate(ids): self.assertEqual(s, actual_site_model[i].id)
def test_file_handler_writes_a_file(self): expected_path = "/tmp/fake_file" remote_path = helpers.get_data_path("config.gem") url = urlparse.urlparse(remote_path) file_handler = handlers.FileHandler(url, expected_path) guaranteed_file = file_handler.handle() self.assertTrue(os.path.isfile(guaranteed_file)) os.unlink(guaranteed_file)
def setUpClass(self): cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini') job = helpers.get_hazard_job(cfg) lt_rlz = models.LtRealization.objects.create( hazard_calculation=job.hazard_calculation, ordinal=0, seed=0, sm_lt_path='foo', gsim_lt_path='bar', total_items=0) output = models.Output.objects.create(oq_job=job, display_name='test', output_type='ses') ses_coll = models.SESCollection.objects.create(output=output, lt_realization=lt_rlz) ses = models.SES.objects.create(ses_collection=ses_coll, investigation_time=50.0, ordinal=1) self.mesh_lons = numpy.array([0.1 * x for x in range(16)]).reshape( (4, 4)) self.mesh_lats = numpy.array([0.2 * x for x in range(16)]).reshape( (4, 4)) self.mesh_depths = numpy.array([0.3 * x for x in range(16)]).reshape( (4, 4)) # planar surface coords self.ps_lons = [1, 3, 5, 7] self.ps_lats = [2, 4, 6, 8] self.ps_depths = [0.1, 0.2, 0.3, 0.4] self.fault_rupture = models.SESRupture.objects.create( ses=ses, old_magnitude=5, old_strike=0, old_dip=0, old_rake=0, old_tectonic_region_type='Active Shallow Crust', old_is_from_fault_source=True, old_lons=self.mesh_lons, old_is_multi_surface=False, old_lats=self.mesh_lats, old_depths=self.mesh_depths) self.source_rupture = models.SESRupture.objects.create( ses=ses, magnitude=5, old_strike=0, old_dip=0, old_rake=0, old_tectonic_region_type='Active Shallow Crust', old_is_from_fault_source=False, old_lons=self.ps_lons, old_is_multi_surface=False, old_lats=self.ps_lats, old_depths=self.ps_depths)
class GroundMotionValuesGetterTestCase(HazardCurveGetterPerAssetTestCase): hazard_demo = get_data_path('event_based_hazard/job.ini') risk_demo = get_data_path('event_based_risk/job.ini') hazard_output_type = 'gmf' getter_class = hazard_getters.GroundMotionValuesGetter taxonomy = 'RM' def test_call(self): _hid, assets, (gmfs, _ruptures) = self.getter().next() for gmvs in gmfs: numpy.testing.assert_allclose([0.1, 0.2, 0.3], gmvs) def test_filter(self): self.getter.max_distance = 0.00001 # 1 cm _hid, assets, (gmvs, ruptures) = self.getter().next() self.assertEqual([], gmvs) self.assertEqual([], ruptures) self.assertEqual([], assets)
class GroundMotionScenarioGetterTestCase(HazardCurveGetterPerAssetTestCase): hazard_demo = get_data_path('scenario_hazard/job.ini') risk_demo = get_data_path('scenario_risk/job.ini') hazard_output_type = 'gmf_scenario' getter_class = hazard_getters.GroundMotionValuesGetter taxonomy = 'RM' def test_call(self): hazard = list(self.getter()) self.assertEqual(1, len(hazard)) _hid, _assets, gmfs = hazard[0] for gmvs in gmfs: numpy.testing.assert_allclose([0.1, 0.2, 0.3], gmvs) def test_filter(self): self.getter.max_distance = 0.00001 # 1 cm _hid, _assets, data = self.getter().next() self.assertEqual([], data[0]) # no assets
def test_is_job_completed(self): job_id = engine._job_from_file( helpers.get_data_path(CONFIG_FILE), 'db').job_id row = models.OqJob.objects.get(id=job_id) pairs = [('pending', False), ('running', False), ('succeeded', True), ('failed', True)] for status, is_completed in pairs: row.status = status row.save() self.assertEqual( JobContext.is_job_completed(job_id), is_completed)
def setUp(self): client = kvs.get_client() # Delete managed job id info so we can predict the job key # which will be allocated for us client.delete(kvs.tokens.CURRENT_JOBS) self.generated_files = [] job = engine.prepare_job() jp, params, sections = import_job_profile(helpers.get_data_path( CONFIG_FILE), job) self.job_ctxt = JobContext( params, job.id, sections=sections, oq_job_profile=jp, oq_job=job) job = engine.prepare_job() jp, params, sections = import_job_profile(helpers.get_data_path( CONFIG_WITH_INCLUDES), job) self.job_ctxt_with_includes = JobContext( params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)
def setUp(self): self.job, _ = helpers.get_fake_risk_job( get_data_path('classical_psha_based_risk/job.ini'), get_data_path('simple_fault_demo_hazard/job.ini')) calculator = base.RiskCalculator(self.job) models.JobStats.objects.create(oq_job=self.job) calculator.pre_execute() self.rc = self.job.risk_calculation common_fake_args = dict(exposure_model=self.rc.exposure_model, taxonomy="test") asset = models.ExposureData(site=Point(0.5, 0.5), asset_ref="test1", **common_fake_args) asset.save() asset = models.ExposureData(site=Point(179.1, 0), asset_ref="test2", **common_fake_args) asset.save()
def setUp(self): cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini') self.job = helpers.get_hazard_job(cfg, username="******") for i in range(0, random.randint(1, 10)): models.LtRealization( hazard_calculation=self.job.hazard_calculation, ordinal=i, seed=None, weight=1 / (i + 1), sm_lt_path=[i], gsim_lt_path=[i], total_items=0, completed_items=0).save()
def test(self): # check that if risk models are provided, then the ``points to # compute`` and the imls are got from there username = helpers.default_user() job = engine.prepare_job(username) cfg = helpers.get_data_path('classical_job-sd-imt.ini') params = engine.parse_config(open(cfg, 'r')) haz_calc = engine.create_calculation(models.HazardCalculation, params) haz_calc = models.HazardCalculation.objects.get(id=haz_calc.id) job.hazard_calculation = haz_calc job.is_running = True job.save() base_path = ('openquake.engine.calculators.hazard.classical.core' '.ClassicalHazardCalculator') init_src_patch = helpers.patch('%s.%s' % (base_path, 'initialize_sources')) init_sm_patch = helpers.patch('%s.%s' % (base_path, 'initialize_site_model')) init_rlz_patch = helpers.patch('%s.%s' % (base_path, 'initialize_realizations')) record_stats_patch = helpers.patch('%s.%s' % (base_path, 'record_init_stats')) init_pr_data_patch = helpers.patch('%s.%s' % (base_path, 'initialize_pr_data')) patches = (init_src_patch, init_sm_patch, init_rlz_patch, record_stats_patch, init_pr_data_patch) mocks = [p.start() for p in patches] get_calculator_class( 'hazard', job.hazard_calculation.calculation_mode)(job).pre_execute() self.assertEqual([(1.0, -1.0), (0.0, 0.0)], [(point.latitude, point.longitude) for point in haz_calc.points_to_compute()]) self.assertEqual(['PGA'], haz_calc.get_imts()) self.assertEqual(3, haz_calc.oqjob.exposuremodel.exposuredata_set.count()) for i, m in enumerate(mocks): m.stop() patches[i].stop() return job
def test_get_status_from_db(self): self.job = engine._job_from_file( helpers.get_data_path(CONFIG_FILE), 'db') row = models.OqJob.objects.get(id=self.job.job_id) row.status = "failed" row.save() self.assertEqual( "failed", JobContext.get_status_from_db(self.job.job_id)) row.status = "running" row.save() self.assertEqual( "running", JobContext.get_status_from_db(self.job.job_id))