def test_end_to_end(self): # manually computed values by Vitor Silva psha.STEPS_PER_INTERVAL = 2 hazard_curve = shapes.Curve([ (0.01, 0.99), (0.08, 0.96), (0.17, 0.89), (0.26, 0.82), (0.36, 0.70), (0.55, 0.40), (0.70, 0.01)]) vuln_function = shapes.VulnerabilityFunction([(0.1, (0.05, 0.5)), (0.2, (0.08, 0.3)), (0.4, (0.2, 0.2)), (0.6, (0.4, 0.1))]) loss_ratio_curve = psha.compute_loss_ratio_curve( vuln_function, hazard_curve) lr_curve_expected = shapes.Curve([(0.0, 0.96), (0.025, 0.96), (0.05, 0.91), (0.065, 0.87), (0.08, 0.83), (0.14, 0.75), (0.2, 0.60), (0.3, 0.47), (0.4, 0.23), (0.7, 0.00), (1.0, 0.00)]) for x_value in lr_curve_expected.abscissae: self.assertTrue(numpy.allclose( lr_curve_expected.ordinate_for(x_value), loss_ratio_curve.ordinate_for(x_value), atol=0.005))
def test_loss_curve_computation(self): loss_ratio_curve = shapes.Curve([(0.1, 1.0), (0.2, 2.0), (0.3, 3.0)]) loss_curve = common.compute_loss_curve(loss_ratio_curve, ASSET_VALUE) self.assertEqual(shapes.Curve([(0.1 * ASSET_VALUE, 1.0), (0.2 * ASSET_VALUE, 2.0), (0.3 * ASSET_VALUE, 3.0)]), loss_curve)
def setUpClass(cls): # simple curve: f(x) = x^2 cls.x_vals = [1, 2, 3] cls.y_vals = [x**2 for x in cls.x_vals] cls.simple_curve = shapes.Curve(zip(cls.x_vals, cls.y_vals)) # straight line cls.straight_curve = shapes.Curve(zip(range(1, 4), range(1, 4)))
def test_reads_multiple_curves_in_one_branch(self): self.python_client.set("KEY", MULTIPLE_CURVES_ONE_BRANCH) curves = self.reader.as_curve("KEY") self.assertEqual(2, len(curves)) self.assertEqual(shapes.Curve( ((1.0, 5.1), (2.0, 5.2), (3.0, 5.3))), curves[0]) self.assertEqual(shapes.Curve( ((1.0, 6.1), (2.0, 6.2), (3.0, 6.3))), curves[1])
def test_reads_multiple_curves_in_multiple_branches(self): self.python_client.set("KEY", MULTIPLE_CURVES_MULTIPLE_BRANCHES) curves = self.reader.as_curve("KEY") self.assertEqual(2, len(curves)) self.assertEqual(shapes.Curve( ((1.0, 1.8), (2.0, 2.8), (3.0, 3.8))), curves[0]) self.assertEqual(shapes.Curve( ((1.0, 1.5), (2.0, 2.5), (3.0, 3.5))), curves[1])
def test_loss_ratio_po_computation(self): loss_ratio_pe_mid_curve = shapes.Curve([(0.0300, 0.2330), (0.0900, 0.0885), (0.1500, 0.0485), (0.2100, 0.0295), (0.2700, 0.0140), (0.3750, 0.0045)]) expected_curve = shapes.Curve([(0.0600, 0.1445), (0.1200, 0.0400), (0.1800, 0.0190), (0.2400, 0.0155), (0.3225, 0.0095)]) self.assertEqual(expected_curve, common._compute_mid_po(loss_ratio_pe_mid_curve))
def test_loss_ratio_pe_mid_curve_computation(self): loss_ratio_curve = shapes.Curve([(0, 0.3460), (0.06, 0.12), (0.12, 0.057), (0.18, 0.04), (0.24, 0.019), (0.3, 0.009), (0.45, 0)]) expected_curve = shapes.Curve([(0.0300, 0.2330), (0.0900, 0.0885), (0.1500, 0.0485), (0.2100, 0.0295), (0.2700, 0.0140), (0.3750, 0.0045)]) self.assertEqual(expected_curve, common._compute_mid_mean_pe(loss_ratio_curve))
def test_can_construct_a_curve_from_list(self): curve1 = shapes.Curve([(0.1, 1.0), (0.2, 2.0)]) curve2 = shapes.Curve.from_list([[0.1, 1.0], [0.2, 2.0]]) curve3 = shapes.Curve([(0.1, (1.0, 0.3)), (0.2, (2.0, 0.3))]) curve4 = shapes.Curve.from_list([[0.1, [1.0, 0.3]], [0.2, [2.0, 0.3]]]) # keys are already floats curve5 = shapes.Curve.from_list([[0.1, [1.0, 0.3]], [0.2, [2.0, 0.3]]]) self.assertEquals(curve1, curve2) self.assertEquals(curve3, curve4) self.assertEquals(curve3, curve5)
def test_can_construct_a_curve_from_dict(self): curve1 = shapes.Curve([(0.1, 1.0), (0.2, 2.0)]) curve2 = shapes.Curve.from_dict({"0.1": 1.0, "0.2": 2.0}) curve3 = shapes.Curve([(0.1, (1.0, 0.3)), (0.2, (2.0, 0.3))]) curve4 = shapes.Curve.from_dict({"0.1": (1.0, 0.3), "0.2": (2.0, 0.3)}) # keys are already floats curve5 = shapes.Curve.from_dict({0.1: (1.0, 0.3), 0.2: (2.0, 0.3)}) self.assertEquals(curve1, curve2) self.assertEquals(curve3, curve4) self.assertEquals(curve3, curve5)
def deserialize(output_id): """ Read a the given loss curve from the database. The structure of the result is documented in :class:`LossCurveDBWriter`. """ loss_curve = models.LossCurve.objects.get(output=output_id) loss_curve_data = loss_curve.losscurvedata_set.all() curves = [] asset = { 'assetValueUnit': loss_curve.unit, 'endBranchLabel': loss_curve.end_branch_label, 'lossCategory': loss_curve.category, } for datum in loss_curve_data: curve = shapes.Curve(zip(datum.losses, datum.poes)) asset = asset.copy() asset['assetID'] = datum.asset_ref loc = datum.location curves.append((shapes.Site(loc.x, loc.y), (curve, asset))) return curves
def test_reads_one_curve(self): self.python_client.set("KEY", ONE_CURVE_MODEL) curves = self.reader.as_curve("KEY") self.assertEqual(1, len(curves)) self.assertEqual(shapes.Curve( ((1.0, 0.1), (2.0, 0.2), (3.0, 0.3))), curves[0])
def test_lrem_po_computation(self): hazard_curve = shapes.Curve([ (0.01, 0.99), (0.08, 0.96), (0.17, 0.89), (0.26, 0.82), (0.36, 0.70), (0.55, 0.40), (0.70, 0.01)]) # pre computed values just use one intermediate # values between the imls psha.STEPS_PER_INTERVAL = 2 vuln_function = shapes.VulnerabilityFunction([(0.1, (0.05, 0.5)), (0.2, (0.08, 0.3)), (0.4, (0.2, 0.2)), (0.6, (0.4, 0.1))]) lrem = psha._compute_lrem(vuln_function) lrem_po = psha._compute_lrem_po(vuln_function, lrem, hazard_curve) self.assertTrue(numpy.allclose(0.07, lrem_po[0][0], atol=0.005)) self.assertTrue(numpy.allclose(0.06, lrem_po[1][0], atol=0.005)) self.assertTrue(numpy.allclose(0.13, lrem_po[0][1], atol=0.005)) self.assertTrue(numpy.allclose(0.47, lrem_po[5][3], atol=0.005)) self.assertTrue(numpy.allclose(0.23, lrem_po[8][3], atol=0.005)) self.assertTrue(numpy.allclose(0.00, lrem_po[10][0], atol=0.005))
def test_computes_the_aggregate_loss_curve(self): vuln_functions = {"ID": self.vuln_function_2} # no epsilon_provided is needed because the vulnerability # function has all the covs equal to zero aggregate_curve = prob.AggregateLossCurve(vuln_functions, None) aggregate_curve.append(self.gmfs_1, self.asset_1) aggregate_curve.append(self.gmfs_2, self.asset_2) aggregate_curve.append(self.gmfs_3, self.asset_3) aggregate_curve.append(self.gmfs_4, self.asset_4) aggregate_curve.append(self.gmfs_5, self.asset_5) aggregate_curve.append(self.gmfs_6, self.asset_6) expected_losses = numpy.array((7.2636, 57.9264, 187.4893, 66.9082, 47.0280, 248.7796, 23.2329, 121.3514, 177.4167, 259.2902, 77.7080, 127.7417, 18.9470, 339.5774, 151.1763, 6.1881, 71.9168, 97.9514, 56.4720, 11.6513)) self.assertTrue(numpy.allclose( expected_losses, aggregate_curve.losses)) expected_curve = shapes.Curve([(39.52702042, 0.99326205), (106.20489077, 0.917915), (172.88276113, 0.77686984), (239.56063147, 0.52763345), (306.23850182, 0.22119922)]) self.assertEqual(expected_curve, aggregate_curve.compute(6))
def test_loss_ratio_curve_in_the_classical_psha_mixin(self): # mixin "instance" mixin = ClassicalPSHABasedMixin() hazard_curve = shapes.Curve([ (0.01, 0.99), (0.08, 0.96), (0.17, 0.89), (0.26, 0.82), (0.36, 0.70), (0.55, 0.40), (0.70, 0.01)]) vuln_function = shapes.VulnerabilityFunction([(0.1, (0.05, 0.5)), (0.2, (0.08, 0.3)), (0.4, (0.2, 0.2)), (0.6, (0.4, 0.1))]) # pre computed values just use one intermediate # values between the imls psha.STEPS_PER_INTERVAL = 2 mixin.job_id = 1234 mixin.vuln_curves = {"ID": vuln_function} asset = {"vulnerabilityFunctionReference": "ID", "assetID": 1} self.assertTrue(mixin.compute_loss_ratio_curve( shapes.GridPoint(None, 10, 20), asset, hazard_curve) is not None)
def test_mean_loss_ratio_computation(self): loss_ratio_curve = shapes.Curve([(0, 0.3460), (0.06, 0.12), (0.12, 0.057), (0.18, 0.04), (0.24, 0.019), (0.3, 0.009), (0.45, 0)]) # TODO (ac): Check the difference between 0.023305 and 0.023673 self.assertAlmostEqual(0.023305, common.compute_mean_loss(loss_ratio_curve), 3)
def test_creating_the_aggregate_curve_from_kvs_gets_all_the_sites(self): expected_curve = shapes.Curve([(39.52702042, 0.99326205), (106.20489077, 0.917915), (172.88276113, 0.77686984), (239.56063147, 0.52763345), (306.23850182, 0.22119922)]) # result is correct, so we are getting the correct assets aggregate_curve = prob.AggregateLossCurve.from_kvs(self.job_id, None) self.assertEqual(expected_curve, aggregate_curve.compute(6))
def _generate_curve(losses, probs_of_exceedance): """Generate a loss ratio (or loss) curve, given a set of losses and corresponding PoEs (Probabilities of Exceedance). This function is intended to be used internally. """ mean_losses = collect(loop(losses, lambda x, y: mean([x, y]))) return shapes.Curve(zip(mean_losses, probs_of_exceedance))
def test_abscissa_for_in_not_ascending_order_with_dups(self): """ This tests the corner case when: "vals must be arranged in ascending order with no duplicates" """ vals = [1, 1, 1] curve = shapes.Curve(zip(vals, vals)) self.assertRaises(AssertionError, curve.abscissa_for, vals)
def _compute_mid_mean_pe(loss_ratio_curve): """Compute a new loss ratio curve taking the mean values.""" loss_ratios = loss_ratio_curve.abscissae pes = loss_ratio_curve.ordinates ratios = collect(loop(loss_ratios, lambda x, y: mean([x, y]))) mid_pes = collect(loop(pes, lambda x, y: mean([x, y]))) return shapes.Curve(zip(ratios, mid_pes))
def test_end_to_end_curves_reading(self): # Hazard object model serialization in JSON is tested in the Java side self.java_client.set("KEY", ONE_CURVE_MODEL) time.sleep(0.3) curves = self.reader.as_curve("KEY") self.assertEqual(1, len(curves)) self.assertEqual(shapes.Curve( ((1.0, 0.1), (2.0, 0.2), (3.0, 0.3))), curves[0])
def _compute_mid_po(loss_ratio_pe_mid_curve): """Compute a loss ratio curve that has PoOs (Probabilities of Occurrence) as Y values.""" loss_ratios = loss_ratio_pe_mid_curve.abscissae pes = loss_ratio_pe_mid_curve.ordinates ratios = collect(loop(loss_ratios, lambda x, y: mean([x, y]))) pos = collect(loop(pes, lambda x, y: x - y)) return shapes.Curve(zip(ratios, pos))
def test_pes_to_pos(self): hazard_curve = shapes.Curve([ (0.01, 0.99), (0.08, 0.96), (0.17, 0.89), (0.26, 0.82), (0.36, 0.70), (0.55, 0.40), (0.70, 0.01)]) expected_pos = [0.0673, 0.1336, 0.2931, 0.4689] pes = [0.05, 0.15, 0.3, 0.5, 0.7] self.assertTrue(numpy.allclose(expected_pos, psha._convert_pes_to_pos(hazard_curve, pes), atol=0.00005))
def test_pes_from_imls(self): hazard_curve = shapes.Curve([ (0.01, 0.99), (0.08, 0.96), (0.17, 0.89), (0.26, 0.82), (0.36, 0.70), (0.55, 0.40), (0.70, 0.01)]) expected_pes = [0.9729, 0.9056, 0.7720, 0.4789, 0.0100] imls = [0.05, 0.15, 0.3, 0.5, 0.7] self.assertTrue(numpy.allclose(numpy.array(expected_pes), psha._compute_pes_from_imls(hazard_curve, imls), atol=0.00005))
def compute_loss_ratio_curve(vuln_function, hazard_curve): """Compute a loss ratio curve for a specific hazard curve (e.g., site), by applying a given vulnerability function. A loss ratio curve is a function that has loss ratios as X values and PoEs (Probabilities of Exceendance) as Y values. """ lrem = _compute_lrem(vuln_function) lrem_po = _compute_lrem_po(vuln_function, lrem, hazard_curve) loss_ratios = _generate_loss_ratios(vuln_function) return shapes.Curve(zip(loss_ratios, lrem_po.sum(axis=1)))
def as_curve(self, key): """Read serialized versions of hazard curves and produce shapes.Curve objects.""" decoded_model = self._get_and_decode(key) curves = [] for raw_curves in decoded_model["hcRepList"]: for curve in raw_curves["probExList"]: curves.append(shapes.Curve(zip(raw_curves["gmLevels"], curve))) return curves
def test_that_conditional_loss_is_in_kvs(self): asset = GRID_ASSETS[(0, 1)] loss_poe = 0.1 job_id = "1" row = 0 col = 1 loss_curve = shapes.Curve([(0.21, 0.131), (0.24, 0.108), (0.27, 0.089), (0.30, 0.066)]) # should set in kvs the conditional loss general.compute_conditional_loss(job_id, col, row, loss_curve, asset, loss_poe) loss_key = kvs.tokens.loss_key(job_id, row, col, asset.asset_ref, loss_poe) self.assertTrue(kvs.get_client().get(loss_key))
def test_loading_and_storing_model_in_kvs(self): path = os.path.join(test.SCHEMA_DIR, TEST_FILE) vulnerability.load_vulnerability_model(1234, path) model = vulnerability.load_vuln_model_from_kvs(1234) self.assertEqual(NO_OF_CURVES_IN_TEST_FILE, len(model)) expected_curve = shapes.Curve([(5.0, (0.00, 0.3)), (5.5, (0.00, 0.3)), (6.0, (0.00, 0.3)), (6.5, (0.00, 0.3)), (7.0, (0.00, 0.3)), (7.5, (0.01, 0.3)), (8.0, (0.06, 0.3)), (8.5, (0.18, 0.3)), (9.0, (0.36, 0.3)), (9.5, (0.36, 0.3)), (10.0, (0.36, 0.3))]) self.assertEqual(expected_curve, model["PK"]) self.assertEqual(expected_curve, model["IR"])
def test_with_no_ground_motion_the_curve_is_a_single_point(self): gmfs = {"IMLs": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "TSES": 900, "TimeSpan": 50} # sounds like a curve, but it's a point :-) expected_curve = shapes.Curve([ (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0)]) self.assertEqual(expected_curve, prob.compute_loss_ratio_curve( self.vuln_function_1, gmfs, None, None))
def test_curve_to_plot_interface_translation(self): curve = shapes.Curve([(0.1, 1.0), (0.2, 2.0)]) expected_data = {} expected_data["AggregateLossCurve"] = {} expected_data["AggregateLossCurve"]["abscissa"] = (0.1, 0.2) expected_data["AggregateLossCurve"]["ordinate"] = (1.0, 2.0) expected_data["AggregateLossCurve"]["abscissa_property"] = \ "Economic Losses" expected_data["AggregateLossCurve"]["ordinate_property"] = \ "PoE in 50.0 years" expected_data["AggregateLossCurve"] \ ["curve_title"] = "Aggregate Loss Curve" self.assertEqual(expected_data, aggregate._for_plotting(curve, 50.0))
def test_equals_when_have_the_same_values(self): curve1 = shapes.Curve([(0.1, 1.0), (0.2, 2.0)]) curve2 = shapes.Curve([(0.1, 1.0), (0.2, 2.0)]) curve3 = shapes.Curve([(0.1, 1.0), (0.2, 5.0)]) curve4 = shapes.Curve([(0.1, (1.0, 0.3)), (0.2, (2.0, 0.3))]) curve5 = shapes.Curve([(0.1, (1.0, 0.3)), (0.2, (2.0, 0.3))]) curve6 = shapes.Curve([(0.1, (1.0, 0.5)), (0.2, (2.0, 0.3))]) self.assertEquals(curve1, curve2) self.assertNotEquals(curve1, curve3) self.assertNotEquals(curve1, curve4) self.assertNotEquals(curve3, curve4) self.assertEquals(curve4, curve5) self.assertNotEquals(curve5, curve6)