def test_sample_based(self): vulnerability_model = dict( RM=vf([0.05, 0.1, 0.2, 0.4, 0.8], [0.05, 0.06, 0.07, 0.08, 0.09]), RC=vf([0.035, 0.07, 0.14, 0.28, 0.56], [0.1, 0.2, 0.3, 0.4, 0.5]), ) gmf = [gmv.a1, gmv.a3] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a1, asset_output_a3] = \ vulnerability_model['RM'].apply_to(gmf, epsilons) self.assertAlmostEqual(521.885458891, asset_output_a1.mean() * 3000, delta=0.05 * 521.885458891) self.assertTrue(asset_output_a1.std(ddof=1) * 3000 > 244.825980356) self.assertAlmostEqual(200.54874638, asset_output_a3.mean() * 1000, delta=0.05 * 200.54874638) self.assertTrue(asset_output_a3.std(ddof=1) * 1000 > 94.2302991022) gmf = [gmv.a2] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a2] = vulnerability_model["RC"].apply_to( gmf, epsilons) self.assertAlmostEqual( 510.821363253, asset_output_a2.mean() * 2000, delta=0.05 * 510.821363253) self.assertTrue(asset_output_a2.std(ddof=1) * 2000 > 259.964152622)
def test_mean_based(self): gmf = [self.hazard_mean["a1"], self.hazard_mean["a3"]] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a1, asset_output_a3] = \ self.vulnerability_model_mean["RM"].apply_to(gmf, epsilons) self.assertAlmostEqual(440.147078317589, asset_output_a1.mean() * 3000) self.assertAlmostEqual( 182.615976701858, asset_output_a1.std(ddof=1) * 3000) self.assertAlmostEqual(180.717534009275, asset_output_a3.mean() * 1000) self.assertAlmostEqual( 92.2122644809969, asset_output_a3.std(ddof=1) * 1000) gmf = [self.hazard_mean["a2"]] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a2] = self.vulnerability_model_mean["RC"].apply_to( gmf, epsilons) self.assertAlmostEqual( 432.225448142534, asset_output_a2.mean() * 2000) self.assertAlmostEqual( 186.864456949986, asset_output_a2.std(ddof=1) * 2000)
def test_sample_based(self): vulnerability_model = dict( RM=vf([0.05, 0.1, 0.2, 0.4, 0.8], [0.05, 0.06, 0.07, 0.08, 0.09]), RC=vf([0.035, 0.07, 0.14, 0.28, 0.56], [0.1, 0.2, 0.3, 0.4, 0.5]), ) gmf = [gmv.a1, gmv.a3] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a1, asset_output_a3] = \ vulnerability_model['RM'].apply_to(gmf, epsilons) self.assertAlmostEqual(521.885458891, asset_output_a1.mean() * 3000, delta=0.05 * 521.885458891) self.assertTrue(asset_output_a1.std(ddof=1) * 3000 > 244.825980356) self.assertAlmostEqual(200.54874638, asset_output_a3.mean() * 1000, delta=0.05 * 200.54874638) self.assertTrue(asset_output_a3.std(ddof=1) * 1000 > 94.2302991022) gmf = [gmv.a2] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a2] = vulnerability_model["RC"].apply_to(gmf, epsilons) self.assertAlmostEqual(510.821363253, asset_output_a2.mean() * 2000, delta=0.05 * 510.821363253) self.assertTrue(asset_output_a2.std(ddof=1) * 2000 > 259.964152622)
def test_mean_based(self): gmf = [self.hazard_mean["a1"], self.hazard_mean["a3"]] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a1, asset_output_a3] = \ self.vulnerability_model_mean["RM"].apply_to(gmf, epsilons) self.assertAlmostEqual(440.147078317589, asset_output_a1.mean() * 3000) self.assertAlmostEqual(182.615976701858, asset_output_a1.std(ddof=1) * 3000) self.assertAlmostEqual(180.717534009275, asset_output_a3.mean() * 1000) self.assertAlmostEqual(92.2122644809969, asset_output_a3.std(ddof=1) * 1000) gmf = [self.hazard_mean["a2"]] epsilons = scientific.make_epsilons(gmf, seed=37, correlation=0) [asset_output_a2 ] = self.vulnerability_model_mean["RC"].apply_to(gmf, epsilons) self.assertAlmostEqual(432.225448142534, asset_output_a2.mean() * 2000) self.assertAlmostEqual(186.864456949986, asset_output_a2.std(ddof=1) * 2000)
def setUp(self): self.test_func = scientific.VulnerabilityFunction( self.IMT, self.IMLS_GOOD, self.LOSS_RATIOS_GOOD, self.COVS_GOOD) epsilons = scientific.make_epsilons( numpy.zeros((1, 1)), seed=3, correlation=0) self.test_func.set_distribution(epsilons)
def test_mean_based_with_partial_correlation(self): # This is a regression test. Data has not been checked vf = (scientific.VulnerabilityFunction('SOME-TAXONOMY', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.01, 0.02, 0.02, 0.01, 0.03])) gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]]) epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5) loss_matrix = vf.apply_to(gmvs, epsilons) losses_poes = scientific.event_based(loss_matrix[0], 120, 30, 4) first_curve_integral = scientific.average_loss(losses_poes) self.assertAlmostEqual(0.48983614471, first_curve_integral) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, investigation_time=50, risk_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=False) out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5]) self.assert_similar( out.event_loss_table, { 1: 15.332714802464356, 2: 16.21582466071975, 3: 15.646630129345354, 4: 15.285164778325353, 5: 15.860930792931873, })
def test_mean_based_with_partial_correlation(self): # This is a regression test. Data has not been checked vf = ( scientific.VulnerabilityFunction( 'SOME-TAXONOMY', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.01, 0.02, 0.02, 0.01, 0.03])) gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]]) epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5) loss_matrix = vf.apply_to(gmvs, epsilons) losses_poes = scientific.event_based(loss_matrix[0], .25, 4) first_curve_integral = scientific.average_loss(losses_poes) self.assertAlmostEqual(0.48983614471, first_curve_integral) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, investigation_time=50, risk_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=False ) wf.riskmodel = mock.MagicMock() out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5]) numpy.testing.assert_almost_equal( out.average_losses, [0.01987912, 0.01929152])
def test_mean_based_with_partial_correlation(self): # This is a regression test. Data has not been checked vf = ( scientific.VulnerabilityFunction( 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.01, 0.02, 0.02, 0.01, 0.03])) gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]]) epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5) loss_matrix = vf.apply_to(gmvs, epsilons) losses, poes = scientific.event_based(loss_matrix[0], 120, 30, 4) first_curve_integral = scientific.average_loss(losses, poes) self.assertAlmostEqual(0.48983614471, first_curve_integral) wf = workflows.ProbabilisticEventBased( vulnerability_functions={self.loss_type: vf}, time_span=50, tses=10000, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=False ) out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5]) self.assert_similar( out.event_loss_table, {1: 15.332714802464356, 2: 16.21582466071975, 3: 15.646630129345354, 4: 15.285164778325353, 5: 15.860930792931873, })
def make_epsilons(asset_count, num_samples, seed, correlation): """ :param int asset_count: the number of assets :param int num_ruptures: the number of ruptures :param int seed: a random seed :param float correlation: the correlation coefficient """ zeros = numpy.zeros((asset_count, num_samples)) return scientific.make_epsilons(zeros, seed, correlation)
def setUp(self): self.test_func = scientific.VulnerabilityFunction( self.ID, self.IMT, self.IMLS_GOOD, self.LOSS_RATIOS_GOOD, self.COVS_GOOD) epsilons = scientific.make_epsilons(numpy.zeros((1, 3)), seed=3, correlation=0) self.test_func.set_distribution(epsilons)
def test_sample_mixed(self): # test that sampling works also when we have both covs = 0 and # covs != 0 assets_num = 1 samples_num = 1 correlation = 0.37 epsilons = scientific.make_epsilons( numpy.zeros((assets_num, samples_num)), seed=17, correlation=correlation) self.dist = scientific.LogNormalDistribution(epsilons) samples = self.dist.sample(numpy.array([0., 0., .1, .1]), numpy.array([0., .1, 0., .1]), None, slice(None)).reshape(-1) numpy.testing.assert_allclose([0., 0., 0.1, 0.10228396], samples)
def test_insured_loss_mean_based(self): vf = scientific.VulnerabilityFunction( 'VF', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.0, 0.0, 0.0, 0.0, 0.0]) epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0) loss_ratios = vf.apply_to(gmf[0:2], epsilons) values = [3000, 1000] insured_limits = [1250., 40.] deductibles = [40, 13] insured_average_losses = [ scientific.average_loss(scientific.event_based( scientific.insured_losses( lrs, deductibles[i] / values[i], insured_limits[i] / values[i]), 50, 50, 20)) for i, lrs in enumerate(loss_ratios)] numpy.testing.assert_allclose( [207.86489132 / 3000, 38.07815797 / 1000], insured_average_losses) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, risk_investigation_time=50, hazard_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=True ) out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5]) self.assert_similar( out.event_loss_table, {1: 0.20314761658291458, 2: 0, 3: 0, 4: 0, 5: 0, })
def add_epsilons(assets_by_site, num_samples, seed, correlation): """ Add an attribute named .epsilons to each asset in the assets_by_site container. """ assets_by_taxonomy = sum( (general.groupby(assets, key=lambda a: a.taxonomy) for assets in assets_by_site), {}) for taxonomy, assets in assets_by_taxonomy.iteritems(): logging.info('Building (%d, %d) epsilons for taxonomy %s', len(assets), num_samples, taxonomy) eps_matrix = scientific.make_epsilons( numpy.zeros((len(assets), num_samples)), seed, correlation) for asset, epsilons in zip(assets, eps_matrix): asset.epsilons = epsilons
def test_mean_based(self): epsilons = scientific.make_epsilons([gmf[0]], seed=1, correlation=0) vulnerability_function_rm = ( scientific.VulnerabilityFunction( 'RM', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.0, 0.0, 0.0, 0.0, 0.0])) vulnerability_function_rc = ( scientific.VulnerabilityFunction( 'RC', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.0035, 0.07, 0.14, 0.28, 0.56], [0.0, 0.0, 0.0, 0.0, 0.0])) cr = 50 # curve resolution curve_rm_1 = scientific.event_based( vulnerability_function_rm.apply_to( [gmf[0]], epsilons)[0], 50, 50, cr) curve_rm_2 = scientific.event_based( vulnerability_function_rm.apply_to( [gmf[1]], epsilons)[0], 50, 50, cr) curve_rc = scientific.event_based( vulnerability_function_rc.apply_to( [gmf[2]], epsilons)[0], 50, 50, cr) for i, curve_rm in enumerate([curve_rm_1, curve_rm_2]): conditional_loss = scientific.conditional_loss_ratio( curve_rm[0], curve_rm[1], 0.8) self.assertAlmostEqual([0.0490311, 0.0428061][i], conditional_loss) self.assertAlmostEqual( [0.070219108, 0.04549904][i], scientific.average_loss(curve_rm)) conditional_loss = scientific.conditional_loss_ratio( curve_rc[0], curve_rc[1], 0.8) self.assertAlmostEqual(0.0152273, conditional_loss) self.assertAlmostEqual( 0.0152393, scientific.average_loss(curve_rc))
def test_mean_based(self): epsilons = scientific.make_epsilons([gmf[0]], seed=1, correlation=0) vulnerability_function_rm = ( scientific.VulnerabilityFunction( 'RM', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.0, 0.0, 0.0, 0.0, 0.0])) vulnerability_function_rc = ( scientific.VulnerabilityFunction( 'RC', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.0035, 0.07, 0.14, 0.28, 0.56], [0.0, 0.0, 0.0, 0.0, 0.0])) cr = 50 # curve resolution curve_rm_1 = scientific.event_based( vulnerability_function_rm.apply_to( [gmf[0]], epsilons)[0], 1, cr) curve_rm_2 = scientific.event_based( vulnerability_function_rm.apply_to( [gmf[1]], epsilons)[0], 1, cr) curve_rc = scientific.event_based( vulnerability_function_rc.apply_to( [gmf[2]], epsilons)[0], 1, cr) for i, curve_rm in enumerate([curve_rm_1, curve_rm_2]): conditional_loss = scientific.conditional_loss_ratio( curve_rm[0], curve_rm[1], 0.8) self.assertAlmostEqual([0.0490311, 0.0428061][i], conditional_loss) self.assertAlmostEqual( [0.070219108, 0.04549904][i], scientific.average_loss(curve_rm)) conditional_loss = scientific.conditional_loss_ratio( curve_rc[0], curve_rc[1], 0.8) self.assertAlmostEqual(0.0152273, conditional_loss) self.assertAlmostEqual( 0.0152393, scientific.average_loss(curve_rc))
def make_eps(asset_array, num_samples, seed, correlation): """ :param asset_array: an array of assets :param int num_samples: the number of ruptures :param int seed: a random seed :param float correlation: the correlation coefficient :returns: epsilons matrix of shape (num_assets, num_samples) """ assets_by_taxo = group_array(asset_array, 'taxonomy') eps = numpy.zeros((len(asset_array), num_samples), numpy.float32) for taxonomy, assets in assets_by_taxo.items(): shape = (len(assets), num_samples) logging.info('Building %s epsilons for taxonomy %s', shape, taxonomy) zeros = numpy.zeros(shape) epsilons = scientific.make_epsilons(zeros, seed, correlation) for asset, epsrow in zip(assets, epsilons): eps[asset['ordinal']] = epsrow return eps
def test_init(self): assets_num = 100 samples_num = 1000 correlation = 0.37 epsilons = scientific.make_epsilons( numpy.zeros((assets_num, samples_num)), seed=17, correlation=correlation) self.dist = scientific.LogNormalDistribution(epsilons) tol = 0.1 for a1, a2 in utils.pairwise(range(assets_num)): coeffs = numpy.corrcoef( self.dist.epsilons[a1, :], self.dist.epsilons[a2, :]) numpy.testing.assert_allclose([1, 1], [coeffs[0, 0], coeffs[1, 1]]) numpy.testing.assert_allclose( correlation, coeffs[0, 1], rtol=0, atol=tol) numpy.testing.assert_allclose( correlation, coeffs[1, 0], rtol=0, atol=tol)
def make_eps_dict(assets_by_site, num_samples, seed, correlation): """ :param assets_by_site: a list of lists of assets :param int num_samples: the number of ruptures :param int seed: a random seed :param float correlation: the correlation coefficient :returns: dictionary asset_id -> epsilons """ eps_dict = {} # asset_id -> epsilons all_assets = (a for assets in assets_by_site for a in assets) assets_by_taxo = groupby(all_assets, operator.attrgetter('taxonomy')) for taxonomy, assets in assets_by_taxo.items(): shape = (len(assets), num_samples) logging.info('Building %s epsilons for taxonomy %s', shape, taxonomy) zeros = numpy.zeros(shape) epsilons = scientific.make_epsilons(zeros, seed, correlation) for asset, eps in zip(assets, epsilons): eps_dict[asset.id] = eps return eps_dict
def test_init(self): assets_num = 100 samples_num = 1000 correlation = 0.37 epsilons = scientific.make_epsilons( numpy.zeros((assets_num, samples_num)), seed=17, correlation=correlation) self.dist = scientific.LogNormalDistribution(epsilons) tol = 0.1 for a1, a2 in scientific.pairwise(range(assets_num)): coeffs = numpy.corrcoef( self.dist.epsilons[a1, :], self.dist.epsilons[a2, :]) numpy.testing.assert_allclose([1, 1], [coeffs[0, 0], coeffs[1, 1]]) numpy.testing.assert_allclose( correlation, coeffs[0, 1], rtol=0, atol=tol) numpy.testing.assert_allclose( correlation, coeffs[1, 0], rtol=0, atol=tol)
def test_insured_loss_mean_based(self): vf = scientific.VulnerabilityFunction('VF', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.0, 0.0, 0.0, 0.0, 0.0]) epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0) loss_ratios = vf.apply_to(gmf[0:2], epsilons) values = [3000., 1000.] insured_limits = [1250., 40.] deductibles = [40., 13.] insured_average_losses = [ scientific.average_loss( scientific.event_based( scientific.insured_losses(lrs, deductibles[i] / values[i], insured_limits[i] / values[i]), 50, 50, 20)) for i, lrs in enumerate(loss_ratios) ] numpy.testing.assert_allclose([0.05667045, 0.02542965], insured_average_losses) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, investigation_time=50, risk_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=True) out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5]) self.assert_similar(out.event_loss_table, { 1: 0.20314761658291458, 2: 0, 3: 0, 4: 0, 5: 0, })
def make_eps(assetcol, num_samples, seed, correlation): """ :param assetcol: an AssetCollection instance :param int num_samples: the number of ruptures :param int seed: a random seed :param float correlation: the correlation coefficient :returns: epsilons matrix of shape (num_assets, num_samples) """ assets_by_taxo = groupby(assetcol, by_taxonomy) eps = numpy.zeros((len(assetcol), num_samples), numpy.float32) for taxonomy, assets in assets_by_taxo.items(): # the association with the epsilons is done in order assets.sort(key=operator.attrgetter('ordinal')) shape = (len(assets), num_samples) logging.info('Building %s epsilons for taxonomy %s', shape, taxonomy) zeros = numpy.zeros(shape) epsilons = scientific.make_epsilons(zeros, seed, correlation) for asset, epsrow in zip(assets, epsilons): eps[asset.ordinal] = epsrow return eps
def test_insured_loss_mean_based(self): vf = scientific.VulnerabilityFunction( 'VF', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.0, 0.0, 0.0, 0.0, 0.0]) epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0) loss_ratios = vf.apply_to(gmf[0:2], epsilons) values = [3000., 1000.] insured_limits = [1250., 40.] deductibles = [40., 13.] insured_average_losses = [ scientific.average_loss(scientific.event_based( scientific.insured_losses( lrs, deductibles[i] / values[i], insured_limits[i] / values[i]), 1, 20)) for i, lrs in enumerate(loss_ratios)] numpy.testing.assert_allclose([0.05667045, 0.02542965], insured_average_losses) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, investigation_time=50, risk_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=True ) wf.riskmodel = mock.MagicMock() out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5]) numpy.testing.assert_almost_equal( out.average_losses, [0.00473820568, 0.0047437959417]) numpy.testing.assert_almost_equal( out.average_insured_losses, [0, 0])
def test_mean_based_with_perfect_correlation(self): # This is a regression test. Data has not been checked vf = ( scientific.VulnerabilityFunction( 'SOME-TAXONOMY', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.01, 0.02, 0.02, 0.01, 0.03])) gmvs = [[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]] epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=1) loss_matrix = vf.apply_to(gmvs, epsilons) losses_poes = scientific.event_based(loss_matrix[0], 120, 30, 4) first_curve_integral = scientific.average_loss(losses_poes) self.assertAlmostEqual(0.483041416, first_curve_integral) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, risk_investigation_time=50, hazard_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=False ) out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5]) self.assert_similar( out.event_loss_table, {1: 15.232320555463319, 2: 16.248173683693864, 3: 15.583030510462981, 4: 15.177382760499968, 5: 15.840499250058254, })
def test_mean_based_with_no_correlation(self): # This is a regression test. Data has not been checked vf = ( scientific.VulnerabilityFunction( 'SOME-TAXONOMY', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.01, 0.02, 0.02, 0.01, 0.03])) gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]]) epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0) loss_matrix = vf.apply_to(gmvs, epsilons) losses_poes = scientific.event_based( loss_matrix[0], 120, 30, curve_resolution=4) first_curve_integral = scientific.average_loss(losses_poes) self.assertAlmostEqual(0.500993631, first_curve_integral) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, risk_investigation_time=50, hazard_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=False ) out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5]) self.assert_similar( out.event_loss_table, {1: 16.246646231503398, 2: 15.613885199116158, 3: 15.669704465134854, 4: 16.241922530992454, 5: 16.010104452203464, })
def make_eps(assets_by_site, num_samples, seed, correlation): """ :param assets_by_site: a list of lists of assets :param int num_samples: the number of ruptures :param int seed: a random seed :param float correlation: the correlation coefficient :returns: epsilons matrix of shape (num_assets, num_samples) """ all_assets = (a for assets in assets_by_site for a in assets) assets_by_taxo = groupby(all_assets, by_taxonomy) num_assets = sum(map(len, assets_by_site)) eps = numpy.zeros((num_assets, num_samples), numpy.float32) for taxonomy, assets in assets_by_taxo.items(): # the association with the epsilons is done in order assets.sort(key=operator.attrgetter('idx')) shape = (len(assets), num_samples) logging.info('Building %s epsilons for taxonomy %s', shape, taxonomy) zeros = numpy.zeros(shape) epsilons = scientific.make_epsilons(zeros, seed, correlation) for asset, epsrow in zip(assets, epsilons): eps[asset.ordinal] = epsrow return eps
def make_eps(assets_by_site, num_samples, seed, correlation): """ :param assets_by_site: a list of lists of assets :param int num_samples: the number of ruptures :param int seed: a random seed :param float correlation: the correlation coefficient :returns: epsilons matrix of shape (num_assets, num_samples) """ all_assets = (a for assets in assets_by_site for a in assets) assets_by_taxo = groupby(all_assets, operator.attrgetter('taxonomy')) num_assets = sum(map(len, assets_by_site)) eps = numpy.zeros((num_assets, num_samples), numpy.float32) for taxonomy, assets in assets_by_taxo.items(): # the association with the epsilons is done in order assets.sort(key=operator.attrgetter('id')) shape = (len(assets), num_samples) logging.info('Building %s epsilons for taxonomy %s', shape, taxonomy) zeros = numpy.zeros(shape) epsilons = scientific.make_epsilons(zeros, seed, correlation) for asset, epsrow in zip(assets, epsilons): eps[asset.idx] = epsrow return eps
def test_mean_based_with_no_correlation(self): # This is a regression test. Data has not been checked vf = (scientific.VulnerabilityFunction('SOME-TAXONOMY', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.01, 0.02, 0.02, 0.01, 0.03])) gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]]) epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0) loss_matrix = vf.apply_to(gmvs, epsilons) losses_poes = scientific.event_based(loss_matrix[0], 120, 30, curve_resolution=4) first_curve_integral = scientific.average_loss(losses_poes) self.assertAlmostEqual(0.500993631, first_curve_integral) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, investigation_time=50, risk_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=False) out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5]) self.assert_similar( out.event_loss_table, { 1: 16.246646231503398, 2: 15.613885199116158, 3: 15.669704465134854, 4: 16.241922530992454, 5: 16.010104452203464, })
def test_mean_based_with_perfect_correlation(self): # This is a regression test. Data has not been checked vf = (scientific.VulnerabilityFunction('SOME-TAXONOMY', 'PGA', [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8], [0.01, 0.02, 0.02, 0.01, 0.03])) gmvs = [[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]] epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=1) loss_matrix = vf.apply_to(gmvs, epsilons) losses_poes = scientific.event_based(loss_matrix[0], 120, 30, 4) first_curve_integral = scientific.average_loss(losses_poes) self.assertAlmostEqual(0.483041416, first_curve_integral) wf = workflows.ProbabilisticEventBased( 'PGA', 'SOME-TAXONOMY', vulnerability_functions={self.loss_type: vf}, investigation_time=50, risk_investigation_time=50, ses_per_logic_tree_path=200, number_of_logic_tree_samples=0, loss_curve_resolution=4, conditional_loss_poes=[0.1, 0.5, 0.9], insured_losses=False) out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5]) self.assert_similar( out.event_loss_table, { 1: 15.232320555463319, 2: 16.248173683693864, 3: 15.583030510462981, 4: 15.177382760499968, 5: 15.840499250058254, })
numpy.array([0.1]))) def test_zero_ratios(self): # a loss ratio can be zero if the corresponding CoV is zero scientific.VulnerabilityFunction('v1', 'PGA', [.1, .2, .3], [0, .1, .2], [0, .2, .3], 'BT') def test_large_covs(self): with self.assertRaises(ValueError) as ctx: scientific.VulnerabilityFunction('v1', 'PGA', [.1, .2, .3], [.05, .1, .2], [.1, .2, 3], 'BT') self.assertIn('The coefficient of variation 3.0 > 2.0 is too large', str(ctx.exception)) epsilons = scientific.make_epsilons(numpy.zeros((1, 3)), seed=3, correlation=0)[0] class VulnerabilityFunctionTestCase(unittest.TestCase): """ Test for :py:class:`openquake.risklib.vulnerability_function.VulnerabilityFunction`. """ IMLS_GOOD = [0.005, 0.007, 0.0098, 0.0137, 0.0192, 0.0269] IMLS_BAD = [-0.1, 0.007, 0.0098, 0.0137, 0.0192, 0.0269] IMLS_DUPE = [0.005, 0.005, 0.0098, 0.0137, 0.0192, 0.0269] IMLS_BAD_ORDER = [0.005, 0.0098, 0.007, 0.0137, 0.0192, 0.0269] LOSS_RATIOS_GOOD = [0.01, 0.1, 0.3, 0.5, 0.6, 1.0] LOSS_RATIOS_BAD = [0.1, 0.3, 0.0, 1.1, -0.1, 0.6] LOSS_RATIOS_TOO_SHORT = [0.1, 0.3, 0.0, 0.5, 1.0]
numpy.testing.assert_allclose( [0.057241368], scientific.BetaDistribution().sample( numpy.array([0.1]), None, numpy.array([0.1]))) class TestMemoize(unittest.TestCase): def test_cache(self): m = mock.Mock(return_value=3) func = utils.memoized(m) self.assertEqual(3, func()) self.assertEqual(3, func()) self.assertEqual(1, m.call_count) epsilons = scientific.make_epsilons( numpy.zeros((1, 3)), seed=3, correlation=0)[0] class VulnerabilityFunctionTestCase(unittest.TestCase): """ Test for :py:class:`openquake.risklib.vulnerability_function.VulnerabilityFunction`. """ IMLS_GOOD = [0.005, 0.007, 0.0098, 0.0137, 0.0192, 0.0269] IMLS_BAD = [-0.1, 0.007, 0.0098, 0.0137, 0.0192, 0.0269] IMLS_DUPE = [0.005, 0.005, 0.0098, 0.0137, 0.0192, 0.0269] IMLS_BAD_ORDER = [0.005, 0.0098, 0.007, 0.0137, 0.0192, 0.0269] LOSS_RATIOS_GOOD = [0.01, 0.1, 0.3, 0.5, 0.6, 1.0] LOSS_RATIOS_BAD = [0.1, 0.3, 0.0, 1.1, -0.1, 0.6] LOSS_RATIOS_TOO_SHORT = [0.1, 0.3, 0.0, 0.5, 1.0]