def test_dmg_per_asset_node(self): point1 = writers.Site(-116., 41.) point2 = writers.Site(-117., 42.) e1 = writers.ExposureData('asset_1', point1) e2 = writers.ExposureData('asset_2', point2) e3 = writers.ExposureData('asset_3', point2) data = itertools.starmap(DMG_DIST_PER_ASSET, [ (e1, NO_DAMAGE, 1.0, 1.6), (e1, SLIGHT, 34.8, 18.3), (e1, MODERATE, 64.2, 19.8), (e1, EXTENSIVE, 64.3, 19.7), (e1, COMPLETE, 64.3, 19.7), (e2, NO_DAMAGE, 1.0, 1.6), (e2, SLIGHT, 34.8, 18.3), (e2, MODERATE, 64.2, 19.8), (e2, EXTENSIVE, 64.3, 19.7), (e2, COMPLETE, 64.3, 19.7), (e3, NO_DAMAGE, 1.1, 1.7), (e3, SLIGHT, 34.9, 18.4), (e3, MODERATE, 64.2, 19.8), (e3, EXTENSIVE, 64.3, 19.7), (e3, COMPLETE, 64.3, 19.7), ]) check_equal(__file__, 'expected_dmg_per_asset.xml', dw.to_nrml('dmg_dist_per_asset', data))
def test(self): result_dir = tempfile.mkdtemp() aaae = numpy.testing.assert_array_almost_equal try: cfg = os.path.join(os.path.dirname(__file__), 'job.ini') expected_curve_poes = [0.75421006, 0.08098179, 0.00686616] job = self.run_hazard(cfg) # Test the poe values of the single curve: [curve] = models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id) aaae(expected_curve_poes, curve.poes, decimal=2) # Test the exports as well: exported_file = hazard_export.export( curve.hazard_curve.output.id, result_dir) check_equal(__file__, 'expected_hazard_curves.xml', exported_file) except: raise else: shutil.rmtree(result_dir)
def test(self): result_dir = tempfile.mkdtemp() try: cfg = os.path.join(os.path.dirname(__file__), 'job.ini') expected_curve_poes = [0.632120, 0.54811, 0.15241] job = self.run_hazard(cfg) # Test the poe values of the single curve: [actual_curve] = models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id) numpy.testing.assert_array_almost_equal( expected_curve_poes, actual_curve.poes, decimal=3) # Test the export as well: exported_file = hazard_export.export( actual_curve.hazard_curve.output.id, result_dir) check_equal(__file__, 'expected_hazard_curves.xml', exported_file) except: raise else: shutil.rmtree(result_dir)
def test_serialize(self): # Test data is: # - 1 gmf collection # - 3 gmf sets # for each set: # - 2 ground motion fields # for each ground motion field: # - 2 nodes # Total nodes: 12 locations = [Location(i * 0.1, i * 0.1) for i in range(12)] gmf_nodes = [GmfNode(i * 0.2, locations[i]) for i in range(12)] gmfs = [ Gmf('SA', 0.1, 5.0, gmf_nodes[:2], 'i=1'), Gmf('SA', 0.2, 5.0, gmf_nodes[2:4], 'i=2'), Gmf('SA', 0.3, 5.0, gmf_nodes[4:6], 'i=3'), Gmf('PGA', None, None, gmf_nodes[6:8], 'i=4'), Gmf('PGA', None, None, gmf_nodes[8:10], 'i=5'), Gmf('PGA', None, None, gmf_nodes[10:], 'i=6'), ] gmf_sets = [ GmfSet(gmfs[:2], 50.0, 1), GmfSet(gmfs[2:4], 40.0, 2), GmfSet(gmfs[4:], 30.0, 3), ] gmf_collection = GmfCollection(gmf_sets) sm_lt_path = 'b1_b2_b3' gsim_lt_path = 'b1_b7_b15' writer = writers.EventBasedGMFXMLWriter( path, sm_lt_path, gsim_lt_path) writer.serialize(gmf_collection) check_equal(__file__, 'expected_gmf.xml', path)
def test_serialize_xml(self): metadata = dict( investigation_time=50.0, imt='SA', poe=0.1, sa_period=0.025, sa_damping=5.0, smlt_path='b1_b2_b4', gsimlt_path='b1_b4_b5' ) writer = writers.HazardMapXMLWriter(path, **metadata) writer.serialize(self.data) check_equal(__file__, 'expected_hazard_map.xml', path)
def test_export(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(__file__), 'job.ini') job = self.run_hazard(cfg) [output] = export.core.get_outputs(job.id, 'gmf_scenario') exported_file = export.core.export(output.id, result_dir) check_equal(__file__, 'expected.xml', exported_file) shutil.rmtree(result_dir)
def test_export(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(case_1.__file__), 'job.ini') job = self.run_hazard(cfg) [output] = export.core.get_outputs(job.id, 'gmf_scenario') exported_file = export.core.export(output.id, result_dir) check_equal(case_1.__file__, 'expected.xml', exported_file) shutil.rmtree(result_dir)
def test_serialize_quantile_xml(self): metadata = dict( investigation_time=50.0, imt='SA', poe=0.1, sa_period=0.025, sa_damping=5.0, statistics='quantile', quantile_value=0.85 ) writer = writers.HazardMapXMLWriter(path, **metadata) writer.serialize(self.data) check_equal(__file__, 'expected_quantile.xml', path)
def test_dmg_total_node(self): data = itertools.starmap(DMG_DIST_TOTAL, [ (NO_DAMAGE, 1.0, 1.6), (SLIGHT, 34.8, 18.3), (MODERATE, 64.2, 19.8), (EXTENSIVE, 64.3, 19.7), (COMPLETE, 64.3, 19.7), ]) check_equal(__file__, 'expected_dmg_total.xml', dw.to_nrml('dmg_dist_total', data))
def test_serialize(self): # Just a basic serialization test metadata = dict( investigation_time=self.TIME, imt='SA', imls=self.IMLS, sa_period=0.025, sa_damping=5.0, smlt_path='b1_b2_b4', gsimlt_path='b1_b4_b5' ) writer = writers.HazardCurveXMLWriter(path, **metadata) writer.serialize(self.data) check_equal(__file__, 'expected_hazard_curves.xml', path)
def test_serialize_quantile(self): # Test serialization of qunatile curves. metadata = dict( investigation_time=self.TIME, imt='SA', imls=self.IMLS, sa_period=0.025, sa_damping=5.0, statistics='quantile', quantile_value=0.15 ) writer = writers.HazardCurveXMLWriter(path, **metadata) writer.serialize(self.data) check_equal(__file__, 'expected_quantile_curves.xml', path)
def compare_xml_outputs(self, job, expected_fnames): result_dir = tempfile.mkdtemp() for output in self.actual_xml_outputs(job): exported_file = export.core.export(output.id, result_dir) actual = os.path.basename(exported_file) for expected in expected_fnames: if actual.startswith(os.path.basename(expected)[:-4]): if self.OVERWRITE_EXPECTED: shutil.copy(exported_file, self._test_path(expected)) else: check_equal( self.module.__file__, expected, exported_file) shutil.rmtree(result_dir)
def test_get_stat_curves_maps(self): tempdir = tempfile.mkdtemp() curves, maps = self.builder.get_curves_maps(self.stats) # expecting arrays of shape (Q1, N) with Q1=3, N=4 actual = os.path.join(tempdir, 'expected_loss_curves.csv') writers.write_csv(actual, curves, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_curves.csv', actual) actual = os.path.join(tempdir, 'expected_loss_maps.csv') writers.write_csv(actual, maps, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_maps.csv', actual) # remove only if the test pass shutil.rmtree(tempdir)
def test_dmg_per_taxonomy_node(self): data = itertools.starmap(DMG_DIST_PER_TAXONOMY, [ ('RC', NO_DAMAGE, 1.0, 1.6), ('RC', SLIGHT, 34.8, 18.3), ('RC', MODERATE, 64.2, 19.8), ('RC', EXTENSIVE, 64.3, 19.7), ('RC', COMPLETE, 64.3, 19.7), ('RM', NO_DAMAGE, 1.0, 1.6), ('RM', SLIGHT, 34.8, 18.3), ('RM', MODERATE, 64.2, 19.8), ('RM', EXTENSIVE, 64.3, 19.7), ('RM', COMPLETE, 64.3, 19.7), ]) check_equal(__file__, 'expected_dmg_per_taxonomy.xml', dw.to_nrml('dmg_dist_per_taxonomy', data))
def test_get_stat_curves(self): tempdir = tempfile.mkdtemp() curves, ins_curves, maps = scientific.get_stat_curves(self.stats) actual = os.path.join(tempdir, 'expected_loss_curves.csv') writers.write_csv(actual, curves, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_curves.csv', actual) actual = os.path.join(tempdir, 'expected_loss_maps.csv') writers.write_csv(actual, maps, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_maps.csv', actual) # remove only if the test pass shutil.rmtree(tempdir)
def test(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(self.CURRENTDIR, 'job.ini') job = self.run_hazard(cfg) curves_PGA = get_mean_curves(job, 'PGA') actual = write_csv(os.path.join(result_dir, 'PGA.csv'), curves_PGA) check_equal(case_13.__file__, 'expected/mean-PGA.csv', actual) curves_SA = get_mean_curves(job, 'SA', 0.2) actual = write_csv(os.path.join(result_dir, 'SA.csv'), curves_SA) check_equal(case_13.__file__, 'expected/mean-SA.csv', actual) shutil.rmtree(result_dir)
def test_collapse_map_node(self): point1 = writers.Site(-72.2, 18.) point2 = writers.Site(-72.25, 18.) e1 = writers.ExposureData('a1', point1) e2 = writers.ExposureData('a2', point1) e3 = writers.ExposureData('a3', point1) e4 = writers.ExposureData('a4', point2) data = itertools.starmap(COLLAPSE_MAP, [ (e1, 1.0, 1.6), (e2, 34.8, 18.3), (e3, 64.2, 19.8), (e4, 64.3, 19.7), ]) check_equal(__file__, 'expected_collapse_map.xml', dw.to_nrml('collapse_map', data))
def test_serialize(self): # Just a basic serialization test metadata1 = dict( investigation_time=50, imt='SA', imls=[0.005, 0.007, 0.0098], sa_period=0.025, sa_damping=5.0, smlt_path='b1_b2_b4', gsimlt_path='b1_b4_b5' ) metadata2 = dict( investigation_time=30, imt='PGA', imls=[0.05, 0.07, 0.8], smlt_path='b1_b2_b4', gsimlt_path='b1_b4_b5' ) writer = writers.MultiHazardCurveXMLWriter( self.path, [metadata1, metadata2]) writer.serialize([self.data1, self.data2]) check_equal(__file__, 'expected_multicurves.xml', self.path)
def test_serialize(self): # Just a basic serialization test metadata1 = dict( investigation_time=50, imt='SA', imls=[0.005, 0.007, 0.0098], sa_period=0.025, sa_damping=5.0, smlt_path='b1_b2_b4', gsimlt_path='b1_b4_b5' ) metadata2 = dict( investigation_time=30, imt='PGA', imls=[0.05, 0.07, 0.8], sa_period=None, sa_damping=None, smlt_path='b1_b2_b4', gsimlt_path='b1_b4_b5' ) writer = writers.MultiHazardCurveXMLWriter( path, [metadata1, metadata2]) writer.serialize([self.data1, self.data2]) check_equal(__file__, 'expected_multicurves.xml', path)
def test(self): result_dir = tempfile.mkdtemp() try: cfg = os.path.join(os.path.dirname(__file__), 'job.ini') expected_curve_poes_b1_b2 = [0.00995, 0.00076, 9.7E-5, 0.0] expected_curve_poes_b1_b3 = [0.043, 0.0012, 7.394E-5, 0.0] job = self.run_hazard(cfg) # Test the poe values for the two curves: curve_b1_b2, curve_b1_b3 = models.HazardCurveData.objects\ .filter(hazard_curve__output__oq_job=job.id)\ .order_by('hazard_curve__lt_realization__lt_model__sm_lt_path') # Sanity check, to make sure we have the curves ordered correctly: self.assertEqual( ['b1', 'b2'], curve_b1_b2.hazard_curve.lt_realization.sm_lt_path) self.assertEqual( ['b1', 'b3'], curve_b1_b3.hazard_curve.lt_realization.sm_lt_path) numpy.testing.assert_array_almost_equal( expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4) numpy.testing.assert_array_almost_equal( expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4) # Test the exports as well: exported_file_b1_b2 = hazard_export.export( curve_b1_b2.hazard_curve.output.id, result_dir) check_equal(__file__, 'expected_b1_b2.xml', exported_file_b1_b2) exported_file_b1_b3 = hazard_export.export( curve_b1_b3.hazard_curve.output.id, result_dir) check_equal(__file__, 'expected_b1_b3.xml', exported_file_b1_b3) except: raise else: shutil.rmtree(result_dir)
def test(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(case_2.__file__), 'job.ini') expected_curve_poes = [0.0095, 0.00076, 0.000097, 0.0] job = self.run_hazard(cfg) # Test the poe values of the single curve: [actual_curve] = models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id) numpy.testing.assert_array_almost_equal(expected_curve_poes, actual_curve.poes, decimal=3) # Test the export as well: exported_file = hazard_export.export( actual_curve.hazard_curve.output.id, result_dir) check_equal(case_2.__file__, 'expected_hazard_curves.xml', exported_file) shutil.rmtree(result_dir)
def test(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(case_10.__file__), 'job.ini') expected_curve_poes_b1_b2 = [0.00995, 0.00076, 9.7E-5, 0.0] expected_curve_poes_b1_b3 = [0.043, 0.0012, 7.394E-5, 0.0] job = self.run_hazard(cfg) # Test the poe values for the two curves: curve_b1_b2, curve_b1_b3 = models.HazardCurveData.objects\ .filter(hazard_curve__output__oq_job=job.id)\ .order_by('hazard_curve__lt_realization__lt_model__sm_lt_path') # Sanity check, to make sure we have the curves ordered correctly: self.assertEqual(['b1', 'b2'], curve_b1_b2.hazard_curve.lt_realization.sm_lt_path) self.assertEqual(['b1', 'b3'], curve_b1_b3.hazard_curve.lt_realization.sm_lt_path) numpy.testing.assert_array_almost_equal(expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4) numpy.testing.assert_array_almost_equal(expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4) # Test the exports as well: exported_file_b1_b2 = hazard_export.export( curve_b1_b2.hazard_curve.output.id, result_dir) check_equal(case_10.__file__, 'expected_b1_b2.xml', exported_file_b1_b2) exported_file_b1_b3 = hazard_export.export( curve_b1_b3.hazard_curve.output.id, result_dir) check_equal(case_10.__file__, 'expected_b1_b3.xml', exported_file_b1_b3) shutil.rmtree(result_dir)
def test(self): result_dir = tempfile.mkdtemp() aaae = numpy.testing.assert_array_almost_equal cfg = os.path.join(os.path.dirname(case_12.__file__), 'job.ini') expected_curve_poes = [0.75421006, 0.08098179, 0.00686616] job = self.run_hazard(cfg) # Test the poe values of the single curve: [curve] = models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id) aaae(expected_curve_poes, curve.poes, decimal=2) # Test the exports as well: exported_file = hazard_export.export(curve.hazard_curve.output.id, result_dir) check_equal(case_12.__file__, 'expected_hazard_curves.xml', exported_file) shutil.rmtree(result_dir)
def test(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(case_6.__file__), 'job.ini') expected_curve_poes = [0.86466, 0.82460, 0.36525] job = self.run_hazard(cfg) # Test the poe values of the single curve: [actual_curve] = models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id) numpy.testing.assert_array_almost_equal( expected_curve_poes, actual_curve.poes, decimal=2) # Test the export as well: exported_file = hazard_export.export( actual_curve.hazard_curve.output.id, result_dir) check_equal(case_6.__file__, 'expected_hazard_curves.xml', exported_file) shutil.rmtree(result_dir)
def test(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(case_7.__file__), 'job.ini') expected_curve_poes_b1 = [0.86466, 0.82460, 0.36525] expected_curve_poes_b2 = [0.63212, 0.61186, 0.25110] expected_mean_poes = [0.794898, 0.760778, 0.331005] job = self.run_hazard(cfg) # Test the poe values for the two curves. actual_curve_b1, actual_curve_b2 = ( models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id, hazard_curve__lt_realization__isnull=False).order_by( 'hazard_curve__lt_realization__lt_model__sm_lt_path')) # Sanity check, to make sure we have the curves ordered correctly: self.assertEqual( ['b1'], actual_curve_b1.hazard_curve.lt_realization.sm_lt_path) self.assertEqual( ['b2'], actual_curve_b2.hazard_curve.lt_realization.sm_lt_path) numpy.testing.assert_array_almost_equal(expected_curve_poes_b1, actual_curve_b1.poes, decimal=3) numpy.testing.assert_array_almost_equal(expected_curve_poes_b2, actual_curve_b2.poes, decimal=3) # Test the mean curve: [mean_curve] = models.HazardCurveData.objects\ .filter(hazard_curve__output__oq_job=job.id, hazard_curve__statistics='mean') numpy.testing.assert_array_almost_equal(expected_mean_poes, mean_curve.poes, decimal=3) # Test the exports as well: exported_file_b1 = hazard_export.export( actual_curve_b1.hazard_curve.output.id, result_dir) check_equal(case_7.__file__, 'expected_b1.xml', exported_file_b1) exported_file_b2 = hazard_export.export( actual_curve_b2.hazard_curve.output.id, result_dir) check_equal(case_7.__file__, 'expected_b2.xml', exported_file_b2) # mean: exported_file_mean = hazard_export.export( mean_curve.hazard_curve.output.id, result_dir) check_equal(case_7.__file__, 'expected_mean.xml', exported_file_mean) shutil.rmtree(result_dir)
def test(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(case_7.__file__), 'job.ini') expected_curve_poes_b1 = [0.86466, 0.82460, 0.36525] expected_curve_poes_b2 = [0.63212, 0.61186, 0.25110] expected_mean_poes = [0.794898, 0.760778, 0.331005] job = self.run_hazard(cfg) # Test the poe values for the two curves. actual_curve_b1, actual_curve_b2 = ( models.HazardCurveData.objects .filter(hazard_curve__output__oq_job=job.id, hazard_curve__lt_realization__isnull=False) .order_by('hazard_curve__lt_realization__lt_model__sm_lt_path') ) # Sanity check, to make sure we have the curves ordered correctly: self.assertEqual( ['b1'], actual_curve_b1.hazard_curve.lt_realization.sm_lt_path) self.assertEqual( ['b2'], actual_curve_b2.hazard_curve.lt_realization.sm_lt_path) numpy.testing.assert_array_almost_equal( expected_curve_poes_b1, actual_curve_b1.poes, decimal=3) numpy.testing.assert_array_almost_equal( expected_curve_poes_b2, actual_curve_b2.poes, decimal=3) # Test the mean curve: [mean_curve] = models.HazardCurveData.objects\ .filter(hazard_curve__output__oq_job=job.id, hazard_curve__statistics='mean') numpy.testing.assert_array_almost_equal( expected_mean_poes, mean_curve.poes, decimal=3) # Test the exports as well: exported_file_b1 = hazard_export.export( actual_curve_b1.hazard_curve.output.id, result_dir) check_equal(case_7.__file__, 'expected_b1.xml', exported_file_b1) exported_file_b2 = hazard_export.export( actual_curve_b2.hazard_curve.output.id, result_dir) check_equal(case_7.__file__, 'expected_b2.xml', exported_file_b2) # mean: exported_file_mean = hazard_export.export( mean_curve.hazard_curve.output.id, result_dir) check_equal(case_7.__file__, 'expected_mean.xml', exported_file_mean) shutil.rmtree(result_dir)
def test_get_stat_curves(self): curves, ins_curves, maps = scientific.get_stat_curves(self.stats) actual = os.path.join(self.tempdir, 'expected_loss_curves.csv') writers.write_csv(actual, curves, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_curves.csv', actual) actual = os.path.join(self.tempdir, 'expected_ins_curves.csv') writers.write_csv(actual, ins_curves, fmt='%05.2f') tests.check_equal(__file__, 'expected_ins_curves.csv', actual) actual = os.path.join(self.tempdir, 'expected_loss_maps.csv') writers.write_csv(actual, maps, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_maps.csv', actual)
def test(self): result_dir = tempfile.mkdtemp() cfg = os.path.join(os.path.dirname(case_8.__file__), 'job.ini') expected_curve_poes_b1_b2 = [0.095163, 0.012362, 0.002262, 0.0] expected_curve_poes_b1_b3 = [0.009950, 0.00076, 9.99995E-6, 0.0] expected_curve_poes_b1_b4 = [0.0009995, 4.5489E-5, 4.07365E-6, 0.0] job = self.run_hazard(cfg) # Test the poe values for the three curves: curve_b1_b2, curve_b1_b3, curve_b1_b4 = ( models.HazardCurveData.objects .filter(hazard_curve__output__oq_job=job.id) .order_by('hazard_curve__lt_realization__lt_model__sm_lt_path') ) # Sanity check, to make sure we have the curves ordered correctly: self.assertEqual( ['b1', 'b2'], curve_b1_b2.hazard_curve.lt_realization.sm_lt_path) self.assertEqual( ['b1', 'b3'], curve_b1_b3.hazard_curve.lt_realization.sm_lt_path) self.assertEqual( ['b1', 'b4'], curve_b1_b4.hazard_curve.lt_realization.sm_lt_path) numpy.testing.assert_array_almost_equal( expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=3) numpy.testing.assert_array_almost_equal( expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=3) numpy.testing.assert_array_almost_equal( expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=3) # Test the exports as well: exported_file_b1_b2 = hazard_export.export( curve_b1_b2.hazard_curve.output.id, result_dir) check_equal(case_8.__file__, 'expected_b1_b2.xml', exported_file_b1_b2) exported_file_b1_b3 = hazard_export.export( curve_b1_b3.hazard_curve.output.id, result_dir) check_equal(case_8.__file__, 'expected_b1_b3.xml', exported_file_b1_b3) exported_file_b1_b4 = hazard_export.export( curve_b1_b4.hazard_curve.output.id, result_dir) check_equal(case_8.__file__, 'expected_b1_b4.xml', exported_file_b1_b4) shutil.rmtree(result_dir)
def test_serialize(self): writer = writers.DisaggXMLWriter(path, **self.metadata) writer.serialize(self.data) check_equal(__file__, 'expected_disagg.xml', path)
def test(self): current = case_11.__file__ result_dir = tempfile.mkdtemp() aaae = numpy.testing.assert_array_almost_equal cfg = os.path.join(os.path.dirname(current), 'job.ini') expected_curve_poes_b1_b2 = [0.0055, 0.00042, 5.77E-5, 0.0] expected_curve_poes_b1_b3 = [0.00995, 0.00076, 9.7E-5, 0.0] expected_curve_poes_b1_b4 = [0.018, 0.0013, 0.00014, 0.0] expected_mean_poes = [0.01067, 0.0008, 9.774E-5, 0.0] expected_q0_1_poes = [0.0055, 0.00042, 5.77E-5, 0.0] expected_q0_9_poes = [0.013975, 0.00103, 0.0001185, 0.0] job = self.run_hazard(cfg) # Test the poe values for the two curves: curve_b1_b2, curve_b1_b3, curve_b1_b4 = ( models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id, hazard_curve__lt_realization__isnull=False).order_by( 'hazard_curve__lt_realization__lt_model__sm_lt_path')) # Sanity check, to make sure we have the curves ordered correctly: self.assertEqual(['b1', 'b2'], curve_b1_b2.hazard_curve.lt_realization.sm_lt_path) self.assertEqual(['b1', 'b3'], curve_b1_b3.hazard_curve.lt_realization.sm_lt_path) self.assertEqual(['b1', 'b4'], curve_b1_b4.hazard_curve.lt_realization.sm_lt_path) aaae(expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4) aaae(expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4) aaae(expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=4) # Test the mean curve: [mean_curve] = models.HazardCurveData.objects\ .filter(hazard_curve__output__oq_job=job.id, hazard_curve__statistics='mean') aaae(expected_mean_poes, mean_curve.poes, decimal=4) # Test the quantile curves: quantile_0_1_curve, quantile_0_9_curve = \ models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id, hazard_curve__statistics='quantile' ).order_by('hazard_curve__quantile') aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=4) aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=4) # Test the exports as well: exported_file_b1_b2 = hazard_export.export( curve_b1_b2.hazard_curve.output.id, result_dir) check_equal(current, 'expected_b1_b2.xml', exported_file_b1_b2) exported_file_b1_b3 = hazard_export.export( curve_b1_b3.hazard_curve.output.id, result_dir) check_equal(current, 'expected_b1_b3.xml', exported_file_b1_b3) exported_file_b1_b4 = hazard_export.export( curve_b1_b4.hazard_curve.output.id, result_dir) check_equal(current, 'expected_b1_b4.xml', exported_file_b1_b4) exported_file_mean = hazard_export.export( mean_curve.hazard_curve.output.id, result_dir) check_equal(current, 'expected_mean.xml', exported_file_mean) q01_file = hazard_export.export( quantile_0_1_curve.hazard_curve.output.id, result_dir) check_equal(current, 'expected_quantile_0_1.xml', q01_file) q09_file = hazard_export.export( quantile_0_9_curve.hazard_curve.output.id, result_dir) check_equal(current, 'expected_quantile_0_9.xml', q09_file) shutil.rmtree(result_dir)
def test_serialize(self): pr1 = ProbabilisticRupture( 1, 5.5, 1.0, 40.0, 10.0, 'Active Shallow Crust', False, False, top_left_corner=(1.1, 1.01, 10.0), top_right_corner=(2.1, 2.01, 20.0), bottom_right_corner=(3.1, 3.01, 30.0), bottom_left_corner=(4.1, 4.01, 40.0)) pr2 = ProbabilisticRupture( 2, 6.5, 0.0, 41.0, 0.0, 'Active Shallow Crust', True, False, lons=[[5.1, 6.1], [7.1, 8.1], ], lats=[[5.01, 6.01], [7.01, 8.01], ], depths=[[10.5, 10.6], [10.7, 10.8], ]) ses1 = SES(1, 50.0, [SESRupture(pr1, 1), SESRupture(pr2, 1)]) pr3 = ProbabilisticRupture( 3, 5.4, 2.0, 42.0, 12.0, 'Stable Shallow Crust', False, False, top_left_corner=(1.1, 1.01, 10.0), top_right_corner=(2.1, 2.01, 20.0), bottom_left_corner=(4.1, 4.01, 40.0), bottom_right_corner=(3.1, 3.01, 30.0)) pr4 = ProbabilisticRupture( 4, 6.4, 3.0, 43.0, 13.0, 'Stable Shallow Crust', True, False, lons=[ [5.2, 6.2], [7.2, 8.2], ], lats=[ [5.02, 6.02], [7.02, 8.02], ], depths=[ [10.1, 10.2], [10.3, 10.4], ]) pr5 = ProbabilisticRupture( 5, 7.4, 4.0, 44.0, 14.0, 'Stable Shallow Crust', False, True, lons=[-1.0, 1.0, -1.0, 1.0, 0.0, 1.1, 0.9, 2.0], lats=[1.0, 1.0, -1.0, -1.0, 1.1, 2.0, 0.0, 0.9], depths=[21.0, 21.0, 59.0, 59.0, 20.0, 20.0, 80.0, 80.0]) ses2 = SES(2, 40.0, [SESRupture(pr3, 1), SESRupture(pr4, 1), SESRupture(pr5, 1)]) sm_lt_path = 'b8_b9_b10' _, self.path = tempfile.mkstemp() writer = writers.SESXMLWriter(self.path, sm_lt_path) writer.serialize([ses1, ses2]) check_equal(__file__, 'expected_ses_collection.xml', self.path)
def test_serialize(self): pr1 = ProbabilisticRupture( 1, 5.5, 1.0, 40.0, 10.0, 'Active Shallow Crust', False, False, top_left_corner=(1.1, 1.01, 10.0), top_right_corner=(2.1, 2.01, 20.0), bottom_right_corner=(3.1, 3.01, 30.0), bottom_left_corner=(4.1, 4.01, 40.0)) pr2 = ProbabilisticRupture( 2, 6.5, 0.0, 41.0, 0.0, 'Active Shallow Crust', True, False, lons=[[5.1, 6.1], [7.1, 8.1], ], lats=[[5.01, 6.01], [7.01, 8.01], ], depths=[[10.5, 10.6], [10.7, 10.8], ]) ses1 = SES(1, 50.0, [SESRupture(pr1, 1), SESRupture(pr2, 1)]) pr3 = ProbabilisticRupture( 3, 5.4, 2.0, 42.0, 12.0, 'Stable Shallow Crust', False, False, top_left_corner=(1.1, 1.01, 10.0), top_right_corner=(2.1, 2.01, 20.0), bottom_left_corner=(4.1, 4.01, 40.0), bottom_right_corner=(3.1, 3.01, 30.0)) pr4 = ProbabilisticRupture( 4, 6.4, 3.0, 43.0, 13.0, 'Stable Shallow Crust', True, False, lons=[ [5.2, 6.2], [7.2, 8.2], ], lats=[ [5.02, 6.02], [7.02, 8.02], ], depths=[ [10.1, 10.2], [10.3, 10.4], ]) pr5 = ProbabilisticRupture( 5, 7.4, 4.0, 44.0, 14.0, 'Stable Shallow Crust', False, True, lons=[-1.0, 1.0, -1.0, 1.0, 0.0, 1.1, 0.9, 2.0], lats=[1.0, 1.0, -1.0, -1.0, 1.1, 2.0, 0.0, 0.9], depths=[21.0, 21.0, 59.0, 59.0, 20.0, 20.0, 80.0, 80.0]) ses2 = SES(2, 40.0, [SESRupture(pr3, 1), SESRupture(pr4, 1), SESRupture(pr5, 1)]) sm_lt_path = 'b8_b9_b10' writer = writers.SESXMLWriter(path, sm_lt_path) writer.serialize([ses1, ses2]) check_equal(__file__, 'expected_ses_collection.xml', path)
def test(self): current = case_11.__file__ result_dir = tempfile.mkdtemp() aaae = numpy.testing.assert_array_almost_equal cfg = os.path.join(os.path.dirname(current), 'job.ini') expected_curve_poes_b1_b2 = [0.0055, 0.00042, 5.77E-5, 0.0] expected_curve_poes_b1_b3 = [0.00995, 0.00076, 9.7E-5, 0.0] expected_curve_poes_b1_b4 = [0.018, 0.0013, 0.00014, 0.0] expected_mean_poes = [0.01067, 0.0008, 9.774E-5, 0.0] expected_q0_1_poes = [0.0055, 0.00042, 5.77E-5, 0.0] expected_q0_9_poes = [0.013975, 0.00103, 0.0001185, 0.0] job = self.run_hazard(cfg) # Test the poe values for the two curves: curve_b1_b2, curve_b1_b3, curve_b1_b4 = ( models.HazardCurveData.objects .filter(hazard_curve__output__oq_job=job.id, hazard_curve__lt_realization__isnull=False) .order_by( 'hazard_curve__lt_realization__lt_model__sm_lt_path')) # Sanity check, to make sure we have the curves ordered correctly: self.assertEqual( ['b1', 'b2'], curve_b1_b2.hazard_curve.lt_realization.sm_lt_path) self.assertEqual( ['b1', 'b3'], curve_b1_b3.hazard_curve.lt_realization.sm_lt_path) self.assertEqual( ['b1', 'b4'], curve_b1_b4.hazard_curve.lt_realization.sm_lt_path) aaae(expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4) aaae(expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4) aaae(expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=4) # Test the mean curve: [mean_curve] = models.HazardCurveData.objects\ .filter(hazard_curve__output__oq_job=job.id, hazard_curve__statistics='mean') aaae(expected_mean_poes, mean_curve.poes, decimal=4) # Test the quantile curves: quantile_0_1_curve, quantile_0_9_curve = \ models.HazardCurveData.objects.filter( hazard_curve__output__oq_job=job.id, hazard_curve__statistics='quantile' ).order_by('hazard_curve__quantile') aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=4) aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=4) # Test the exports as well: exported_file_b1_b2 = hazard_export.export( curve_b1_b2.hazard_curve.output.id, result_dir) check_equal(current, 'expected_b1_b2.xml', exported_file_b1_b2) exported_file_b1_b3 = hazard_export.export( curve_b1_b3.hazard_curve.output.id, result_dir) check_equal(current, 'expected_b1_b3.xml', exported_file_b1_b3) exported_file_b1_b4 = hazard_export.export( curve_b1_b4.hazard_curve.output.id, result_dir) check_equal(current, 'expected_b1_b4.xml', exported_file_b1_b4) exported_file_mean = hazard_export.export( mean_curve.hazard_curve.output.id, result_dir) check_equal(current, 'expected_mean.xml', exported_file_mean) q01_file = hazard_export.export( quantile_0_1_curve.hazard_curve.output.id, result_dir) check_equal(current, 'expected_quantile_0_1.xml', q01_file) q09_file = hazard_export.export( quantile_0_9_curve.hazard_curve.output.id, result_dir) check_equal(current, 'expected_quantile_0_9.xml', q09_file) shutil.rmtree(result_dir)