예제 #1
0
 def setUp(self):
     self.v1 = scientific.VulnerabilityFunction(
         'V1', self.IMT, [0, 1], [0.5, 0.7], [0, 0], "LN")
     self.v1.seed = 41
     self.v2 = scientific.VulnerabilityFunction(
         'V2', self.IMT, [0, 1, 2], [0.25, 0.5, 0.75], [0, 0, 0], "LN")
     self.v2.seed = 41
예제 #2
0
    def test_mean_based_with_partial_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (
            scientific.VulnerabilityFunction(
                'SOME-TAXONOMY', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.01, 0.02, 0.02, 0.01, 0.03]))
        gmvs = numpy.array([[10., 20., 30., 40., 50.],
                           [1., 2., 3., 4., 5.]])
        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5)
        loss_matrix = vf.apply_to(gmvs, epsilons)

        losses_poes = scientific.event_based(loss_matrix[0], .25, 4)
        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.48983614471, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False
            )
        wf.riskmodel = mock.MagicMock()
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        numpy.testing.assert_almost_equal(
            out.average_losses, [0.01987912, 0.01929152])
예제 #3
0
    def test_beta_distribution(self):
        loss_ratio_curve = scientific.classical(
            scientific.VulnerabilityFunction('VF', 'PGA',
                                             [0.1, 0.2, 0.3, 0.45, 0.6],
                                             [0.05, 0.1, 0.2, 0.4, 0.8],
                                             [0.5, 0.4, 0.3, 0.2, 0.1], "BT"),
            self.hazard_imls,
            self.hazard_curve,
            steps=5)

        poes = [
            0.039334753367700, 0.039125428171600, 0.037674168943300,
            0.034759710983600, 0.031030531006800, 0.027179528786300,
            0.023629919279000, 0.020549508446100, 0.017953286405900,
            0.015789769371500, 0.013989999469800, 0.011228361585000,
            0.009252778235140, 0.007776981119440, 0.006618721902640,
            0.005678492205870, 0.004293209819490, 0.003423791350520,
            0.002851589502850, 0.002371116350380, 0.001901538687150,
            0.001145930527350, 0.000834074579570, 0.000755265952955,
            0.000655382394929, 0.000422046545856, 0.000266286103069,
            0.000124036890130, 3.28497166702e-05, 2.178664466e-06, 0.0
        ]

        numpy.testing.assert_allclose(self.loss_ratios, loss_ratio_curve[0])
        numpy.testing.assert_allclose(poes, loss_ratio_curve[1])

        asset_value = 2.
        self.assertAlmostEqual(
            0.264870863283,
            scientific.conditional_loss_ratio(self.loss_ratios, poes, 0.01) *
            asset_value)
예제 #4
0
    def test_mean_based_with_partial_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (scientific.VulnerabilityFunction('SOME-TAXONOMY', 'PGA',
                                               [0.001, 0.2, 0.3, 0.5, 0.7],
                                               [0.01, 0.1, 0.2, 0.4, 0.8],
                                               [0.01, 0.02, 0.02, 0.01, 0.03]))
        gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]])
        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5)
        loss_matrix = vf.apply_to(gmvs, epsilons)

        losses_poes = scientific.event_based(loss_matrix[0], 120, 30, 4)
        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.48983614471, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA',
            'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False)
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table, {
                1: 15.332714802464356,
                2: 16.21582466071975,
                3: 15.646630129345354,
                4: 15.285164778325353,
                5: 15.860930792931873,
            })
예제 #5
0
    def to_node(self):
        tset = self.tableset
        dvs_node = record.nodedict(tset.tableDiscreteVulnerabilitySet)
        dvf_node = record.nodedict(tset.tableDiscreteVulnerability)
        for (set_id, vf_id), group in groupby(
                tset.tableDiscreteVulnerabilityData,
            ['vulnerabilitySetID', 'vulnerabilityFunctionID']):
            dvf = dvf_node[set_id, vf_id]
            imt = dvs_node[(set_id, )].IML['IMT']
            coeffs = []
            ratios = []
            imls = []
            for row in group:
                imls.append(row['IML'])
                coeffs.append(row['coefficientsVariation'])
                ratios.append(row['lossRatio'])

            # check that we can instantiate a VulnerabilityFunction in risklib
            scientific.VulnerabilityFunction(imt, map(float, imls),
                                             map(float, ratios),
                                             map(float, coeffs))

            dvf.lossRatio.text = ' '.join(ratios)
            dvf.coefficientsVariation.text = ' '.join(coeffs)
            dvs_node[(set_id, )].append(dvf)
            dvs_node[(set_id, )].IML.text = ' '.join(imls)
        return Node('vulnerabilityModel', nodes=dvs_node.values())
예제 #6
0
def _get_vulnerability_functions(vulnerability_file):
    """
    :param vulnerability_file:
        the pathname to a vulnerability file
    :returns:
        a dictionary {taxonomy: vulnerability_function}
    :raises:
        * `ValueError` if validation of any vulnerability function fails
    """
    vfs = {}
    for record in parsers.VulnerabilityModelParser(vulnerability_file):
        taxonomy = record['ID']
        imt = record['IMT']
        loss_ratios = record['lossRatio']
        covs = record['coefficientsVariation']
        distribution = record['probabilisticDistribution']

        if taxonomy in vfs:
            raise ValueError("Error creating vulnerability function for "
                             "taxonomy %s. A taxonomy can not "
                             "be associated with "
                             "different vulnerability functions" % taxonomy)
        try:
            vfs[taxonomy] = scientific.VulnerabilityFunction(
                imt, record['IML'], loss_ratios, covs, distribution)
        except ValueError, err:
            msg = "Invalid vulnerability function with ID '%s': %s" % (
                taxonomy, err.message)
            raise ValueError(msg)
예제 #7
0
 def setUpClass(cls):
     cls.vf = scientific.VulnerabilityFunction(
         'RM', 'PGA', [0.02, 0.3, 0.5, 0.9, 1.2],
         [0.05, 0.1, 0.2, 0.4, 0.8],
         [0.0001, 0.0001, 0.0001, 0.0001, 0.0001])
     cls.vf.seed = 42
     cls.vf.init()
예제 #8
0
    def test_compute_loss_ratio_curve(self):
        hazard_imls = [0.01, 0.08, 0.17, 0.26, 0.36, 0.55, 0.7]
        hazard_curve = [0.99, 0.96, 0.89, 0.82, 0.7, 0.4, 0.01]
        imls = [0.1, 0.2, 0.4, 0.6]
        covs = [0.5, 0.3, 0.2, 0.1]
        loss_ratios = [0.05, 0.08, 0.2, 0.4]

        vulnerability_function = scientific.VulnerabilityFunction(
            'VF', 'PGA', imls, loss_ratios, covs, "LN")

        loss_ratio_curve = scientific.classical(
            vulnerability_function, hazard_imls, hazard_curve, 2)

        expected_curve = [
            (0.0, 0.96), (0.025, 0.96),
            (0.05, 0.91), (0.065, 0.87),
            (0.08, 0.83), (0.14, 0.75),
            (0.2, 0.60), (0.3, 0.47),
            (0.4, 0.23), (0.7, 0.00),
            (1.0, 0.00)]

        actual_poes_interp = interp1d(loss_ratio_curve[0],
                                      loss_ratio_curve[1])

        for loss, poe in expected_curve:
            numpy.testing.assert_allclose(
                poe, actual_poes_interp(loss), atol=0.005)
예제 #9
0
    def test_mean_loss(self):
        vf = scientific.VulnerabilityFunction(
            'VF1', 'PGA', imls=[0.1, 0.2, 0.3, 0.5, 0.7],
            mean_loss_ratios=[0.0035, 0.07, 0.14, 0.28, 0.56],
            covs=[0.1, 0.2, 0.3, 0.4, 0.5])
        vf.seed = 42
        vf.init()

        epsilons = [0.98982371, 0.2776809, -0.44858935, 0.96196624,
                    -0.82757864, 0.53465707, 1.22838619]
        imls = [0.280357, 0.443609, 0.241845, 0.506982, 0.459758,
                0.456199, 0.38077]
        mean = vf(imls, epsilons).mean()
        aaae(mean, 0.2318058254)

        # if you don't reorder the epsilons, the mean loss depends on
        # the order of the imls!
        reordered_imls = [0.443609, 0.280357, 0.241845, 0.506982, 0.459758,
                          0.456199, 0.38077]
        mean2 = vf(reordered_imls, epsilons).mean()
        aaae(mean2, 0.238145174018)
        self.assertGreater(abs(mean2 - mean), 0.005)

        # by reordering the epsilons the problem is solved
        reordered_epsilons = [0.2776809, 0.98982371, -0.44858935, 0.96196624,
                              -0.82757864, 0.53465707, 1.22838619]
        mean3 = vf(reordered_imls, reordered_epsilons).mean()
        aaae(mean3, mean)
예제 #10
0
    def test_sampling_lr_gmf_inside_range_vulnimls(self):
        """
        Sampling loss ratios (covs greater than zero), Ground Motion Fields
        IMLs inside range defined by Vulnerability function's imls.
        """

        vf = scientific.VulnerabilityFunction(
            'VF', 'PGA',
            [0.10, 0.30, 0.50, 1.00], [0.05, 0.10, 0.15, 0.30],
            [0.30, 0.30, 0.20, 0.20], "LN")

        gmf = (
            0.1576, 0.9706, 0.9572, 0.4854, 0.8003,
            0.1419, 0.4218, 0.9157, 0.7922, 0.9595,
        )

        expected_loss_ratios = numpy.array([
            0.0722, 0.4106, 0.1800, 0.1710, 0.2508,
            0.0395, 0.1145, 0.2883, 0.4734, 0.4885,
        ])

        vf.set_distribution(EPSILONS)
        ratios = vf._apply(gmf)
        numpy.testing.assert_allclose(expected_loss_ratios,
                                      ratios, atol=0.0, rtol=0.01)
예제 #11
0
def vulnerability(vulnerability_file):
    """
    :param vulnerability_file:
        the pathname to a vulnerability file
    :returns:
        an assoc list between taxonomies and `RiskModel` instances
    :raises:
        * `ValueError` if validation of any vulnerability function fails
    """
    vfs = dict()

    for record in parsers.VulnerabilityModelParser(vulnerability_file):
        taxonomy = record['ID']
        imt = record['IMT']
        loss_ratios = record['lossRatio']
        covs = record['coefficientsVariation']
        distribution = record['probabilisticDistribution']

        if taxonomy in vfs:
            raise ValueError("Error creating vulnerability function for "
                             "taxonomy %s. A taxonomy can not "
                             "be associated with "
                             "different vulnerability functions" % (taxonomy))

        try:
            vfs[taxonomy] = RiskModel(
                imt,
                scientific.VulnerabilityFunction(record['IML'], loss_ratios,
                                                 covs, distribution), None)
        except ValueError, err:
            msg = ("Invalid vulnerability function with ID '%s': %s" %
                   (taxonomy, err.message))
            raise ValueError(msg)
예제 #12
0
    def test_bcr_classical(self):
        vulnerability_function_rm = (scientific.VulnerabilityFunction(
            'RM', 'PGA', [0.1, 0.2, 0.3, 0.45, 0.6],
            [0.05, 0.1, 0.2, 0.4, 0.8], [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        vulnerability_function_rf = (scientific.VulnerabilityFunction(
            'RF', 'PGA', [0.1, 0.2, 0.3, 0.45, 0.6],
            [0.035, 0.07, 0.14, 0.28, 0.56], [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        asset_value = 2.
        retrofitting_cost = .1
        interest_rate = 0.05
        asset_life_expectancy = 40

        hazard_imls = [
            0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
            0.55, 0.6, 0.7, 0.8, 0.9, 1.0
        ]
        poes = [
            0.039861266979, 0.039861266979, 0.0397287574803, 0.0296134266256,
            0.0198273287565, 0.0130622701615, 0.00865538795, 0.00589852059369,
            0.00406169858951, 0.00281172717953, 0.00199511741778,
            0.00135870597285, 0.000989667841574, 0.000757544444296,
            0.000272824002046, 0.0, 0.0, 0.
        ]

        original_loss_ratio_curve = scientific.classical(
            vulnerability_function_rm, hazard_imls, poes, steps=5)
        retrofitted_loss_ratio_curve = scientific.classical(
            vulnerability_function_rf, hazard_imls, poes, steps=5)

        eal_original = scientific.average_loss(original_loss_ratio_curve)
        eal_retrofitted = scientific.average_loss(retrofitted_loss_ratio_curve)

        bcr = scientific.bcr(eal_original, eal_retrofitted, interest_rate,
                             asset_life_expectancy, asset_value,
                             retrofitting_cost)

        self.assertAlmostEqual(0.009379,
                               eal_original * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.006586,
                               eal_retrofitted * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.483091, bcr, delta=0.009)
예제 #13
0
 def test__evenly_spaced_loss_ratios_append_1(self):
     vf = scientific.VulnerabilityFunction('VF', self.IMT, [0, 1],
                                           [0.0, 0.5], [0, 0])
     vf.seed = 42
     vf.init()
     es_lrs = vf.mean_loss_ratios_with_steps(2)
     expected = [0.0, 0.25, 0.5, 0.75, 1.0]
     numpy.testing.assert_allclose(es_lrs, expected)
예제 #14
0
 def test_large_covs(self):
     with self.assertRaises(ValueError) as ctx:
         scientific.VulnerabilityFunction('v1', 'PGA', [.1, .2, .3],
                                          [.05, .1, .2], [.1, .2, 3], 'BT')
     self.assertIn(
         'The coefficient of variation 3.0 > 2.0 does not satisfy'
         ' the requirement 0 < σ < sqrt[μ × (1 - μ)] in '
         '<VulnerabilityFunction(v1, PGA)>', str(ctx.exception))
예제 #15
0
    def test_mean_based(self):
        epsilons = scientific.make_epsilons([gmf[0]], seed=1, correlation=0)
        vulnerability_function_rm = (
            scientific.VulnerabilityFunction(
                'RM', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.0, 0.0, 0.0, 0.0, 0.0]))

        vulnerability_function_rc = (
            scientific.VulnerabilityFunction(
                'RC', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.0035, 0.07, 0.14, 0.28, 0.56],
                [0.0, 0.0, 0.0, 0.0, 0.0]))

        cr = 50  # curve resolution
        curve_rm_1 = scientific.event_based(
            vulnerability_function_rm.apply_to(
                [gmf[0]], epsilons)[0], 1, cr)

        curve_rm_2 = scientific.event_based(
            vulnerability_function_rm.apply_to(
                [gmf[1]], epsilons)[0], 1, cr)

        curve_rc = scientific.event_based(
            vulnerability_function_rc.apply_to(
                [gmf[2]], epsilons)[0], 1, cr)

        for i, curve_rm in enumerate([curve_rm_1, curve_rm_2]):

            conditional_loss = scientific.conditional_loss_ratio(
                curve_rm[0], curve_rm[1], 0.8)
            self.assertAlmostEqual([0.0490311, 0.0428061][i], conditional_loss)

            self.assertAlmostEqual(
                [0.070219108, 0.04549904][i],
                scientific.average_loss(curve_rm))

        conditional_loss = scientific.conditional_loss_ratio(
            curve_rc[0], curve_rc[1], 0.8)
        self.assertAlmostEqual(0.0152273, conditional_loss)

        self.assertAlmostEqual(
            0.0152393, scientific.average_loss(curve_rc))
예제 #16
0
    def setUp(self):
        self.test_func = scientific.VulnerabilityFunction(
            self.ID, self.IMT, self.IMLS_GOOD, self.LOSS_RATIOS_GOOD,
            self.COVS_GOOD)

        epsilons = scientific.make_epsilons(numpy.zeros((1, 3)),
                                            seed=3,
                                            correlation=0)
        self.test_func.set_distribution(epsilons)
예제 #17
0
    def test_strictly_increasing(self):
        vf = scientific.VulnerabilityFunction('VF', self.IMT, [0, 1, 2, 3],
                                              [0.0, 0.5, 0.5, 1], [0, 0, 3, 4])
        vfs = vf.strictly_increasing()

        numpy.testing.assert_allclose([0, 1, 3], vfs.imls)
        numpy.testing.assert_allclose([0, 0.5, 1], vfs.mean_loss_ratios)
        numpy.testing.assert_allclose([0, 0, 4], vfs.covs)
        self.assertEqual(vf.distribution_name, vfs.distribution_name)
예제 #18
0
    def test_lrem_lr_cov_special_cases(self):
        # Test LREM computation for points in a vuln curve where the loss ratio
        # > 0 and the CoV = 0, or loss ratio = 0 and CoV = 0.
        # If LR > 0 and CoV = 0, the PoE for values <= this LR are 1, and >
        # this LR are 0.
        # If LR = 0 and CoV = 0, the PoE will be 0.
        curve = scientific.VulnerabilityFunction(
            self.ID,
            self.IMT,
            [0.1, 0.2, 0.3, 0.45, 0.6],  # IMLs
            [0.0, 0.1, 0.2, 0.4, 1.2],  # loss ratios
            [0.0, 0.0, 0.3, 0.2, 0.1],  # CoVs
            'LN')
        loss_ratios, lrem = curve.loss_ratio_exceedance_matrix(5)
        expected_lrem = numpy.array([
            [0.000, 1.000, 1.000, 1.000, 1.000],
            [0.000, 1.000, 1.000, 1.000, 1.000],
            [0.000, 1.000, 1.000, 1.000, 1.000],
            [0.000, 1.000, 1.000, 1.000, 1.000],
            [0.000, 1.000, 0.999, 1.000, 1.000],
            [0.000, 1.000, 0.987, 1.000, 1.000],
            [0.000, 0.000, 0.944, 1.000, 1.000],
            [0.000, 0.000, 0.857, 1.000, 1.000],
            [0.000, 0.000, 0.730, 1.000, 1.000],
            [0.000, 0.000, 0.584, 1.000, 1.000],
            [0.000, 0.000, 0.442, 1.000, 1.000],
            [0.000, 0.000, 0.221, 0.993, 1.000],
            [0.000, 0.000, 0.098, 0.956, 1.000],
            [0.000, 0.000, 0.040, 0.848, 1.000],
            [0.000, 0.000, 0.016, 0.667, 1.000],
            [0.000, 0.000, 0.006, 0.461, 1.000],
            [0.000, 0.000, 0.000, 0.036, 1.000],
            [0.000, 0.000, 0.000, 0.001, 1.000],
            [0.000, 0.000, 0.000, 0.000, 0.999],
            [0.000, 0.000, 0.000, 0.000, 0.917],
            [0.000, 0.000, 0.000, 0.000, 0.480],
        ])
        aaae(lrem, expected_lrem, decimal=3)

        expected_counts = numpy.matrix([[4, 4, 4, 4, 4], [4, 4, 4, 4, 4],
                                        [4, 4, 4, 4, 4], [4, 4, 4, 4, 4],
                                        [4, 4, 4, 4, 3], [4, 4, 4, 4, 3],
                                        [3, 3, 3, 3, 2], [3, 3, 3, 3, 2],
                                        [3, 3, 3, 2, 2], [3, 3, 2, 2, 2],
                                        [3, 3, 2, 2, 2], [3, 2, 2, 2, 1],
                                        [2, 2, 2, 2, 1], [2, 2, 2, 2, 1],
                                        [2, 2, 2, 1, 1], [2, 2, 1, 1, 1],
                                        [1, 1, 1, 1, 1], [1, 1, 1, 1, 1],
                                        [1, 1, 1, 1, 0], [1, 1, 1, 1, 0],
                                        [1, 1, 0, 0, 0]])
        # this is a test with curve_resolution=5, i.e. with ratios
        # [0.2, 0.4, 0.6, 0.8, 1.]; for each row in the lrem we
        # count how many ratios are greater equal than each ratio
        b = scientific.CurveBuilder('structural',
                                    numpy.linspace(0.2, 1, 5),
                                    user_provided=True)
        aaae(b.build_counts(expected_lrem), expected_counts)
예제 #19
0
    def test__evenly_spaced_loss_ratios_prepend_0(self):
        # We expect a 0.0 to be prepended to the LRs before spacing them

        vf = scientific.VulnerabilityFunction(
            'VF', self.IMT, [1, 2, 3, 4], [0.1, 0.2, 0.4, 1.2], [0, 0, 0, 0])
        es_lrs = vf.mean_loss_ratios_with_steps(5)
        expected = [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18,
                    0.2, 0.24000000000000002, 0.28, 0.32, 0.36, 0.4, 0.56,
                    0.72, 0.8799999999999999, 1.04, 1.2]
        numpy.testing.assert_allclose(es_lrs, expected)
예제 #20
0
    def test_bin_width_from_imls(self):
        imls = [0.1, 0.2, 0.4, 0.6]
        covs = [0.5, 0.5, 0.5, 0.5]
        loss_ratios = [0.05, 0.08, 0.2, 0.4]

        vulnerability_function = scientific.VulnerabilityFunction(
            'VF', 'PGA', imls, loss_ratios, covs, "LN")

        expected_steps = [0.05, 0.15, 0.3, 0.5, 0.7]

        numpy.testing.assert_allclose(
            expected_steps, vulnerability_function.mean_imls())
예제 #21
0
    def test__evenly_spaced_loss_ratios(self):
        vf = scientific.VulnerabilityFunction(
            'VF', self.IMT, [0, 1, 2, 3, 4], [0.0, 0.1, 0.2, 0.4, 1.2],
            [0, 0, 0, 0, 0])
        vf.seed = 42
        vf.init()

        es_lrs = vf.mean_loss_ratios_with_steps(5)
        expected = [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18,
                    0.2, 0.24000000000000002, 0.28, 0.32, 0.36, 0.4, 0.56,
                    0.72, 0.8799999999999999, 1.04, 1.2]
        numpy.testing.assert_allclose(es_lrs, expected)
예제 #22
0
def get_vulnerability_functions_05(node, fname):
    """
    :param node:
        a vulnerabilityModel node
    :param fname:
        path of the vulnerability filter
    :returns:
        a dictionary imt, taxonomy -> vulnerability function
    """
    # NB: the IMTs can be duplicated and with different levels, each
    # vulnerability function in a set will get its own levels
    taxonomies = set()
    vmodel = scientific.VulnerabilityModel(**node.attrib)
    # imt, taxonomy -> vulnerability function
    for vfun in node.getnodes('vulnerabilityFunction'):
        with context(fname, vfun):
            imt = vfun.imls['imt']
            imls = numpy.array(~vfun.imls)
            taxonomy = vfun['id']
        if taxonomy in taxonomies:
            raise InvalidFile(
                'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                (taxonomy, fname, vfun.lineno))
        if vfun['dist'] == 'PM':
            loss_ratios, probs = [], []
            for probabilities in vfun[1:]:
                loss_ratios.append(probabilities['lr'])
                probs.append(valid.probabilities(~probabilities))
            probs = numpy.array(probs)
            assert probs.shape == (len(loss_ratios), len(imls))
            vmodel[imt, taxonomy] = (
                scientific.VulnerabilityFunctionWithPMF(
                    taxonomy, imt, imls, numpy.array(loss_ratios),
                    probs))  # the seed will be set by readinput.get_risk_model
        else:
            with context(fname, vfun):
                loss_ratios = ~vfun.meanLRs
                coefficients = ~vfun.covLRs
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.meanLRs.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, '
                    'line %d' % (len(coefficients), len(imls), fname,
                                 vfun.covLRs.lineno))
            with context(fname, vfun):
                vmodel[imt, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt, imls, loss_ratios, coefficients,
                    vfun['dist'])
    return vmodel
예제 #23
0
 def test_vuln_func_constructor_raises_on_invalid_lr_cov(self):
     # If a loss ratio is 0.0 and the corresponding CoV is > 0.0, we expect
     # a ValueError.
     with self.assertRaises(ValueError) as ar:
         vf = scientific.VulnerabilityFunction(
             self.ID, self.IMT, self.IMLS_GOOD,
             [0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
             [0.001, 0.002, 0.003, 0.004, 0.005, 0.006])
         vf.init()
     expected_error = (
         'It is not valid to define a mean loss ratio = 0 with a '
         'corresponding coefficient of variation > 0')
     self.assertEqual(expected_error, str(ar.exception))
예제 #24
0
def get_vulnerability_functions_04(fname):
    """
    Parse the vulnerability model in NRML 0.4 format.

    :param fname:
        path of the vulnerability file
    :returns:
        a dictionary imt, taxonomy -> vulnerability function + vset
    """
    categories = dict(assetCategory=set(),
                      lossCategory=set(),
                      vulnerabilitySetID=set())
    imts = set()
    taxonomies = set()
    vf_dict = {}  # imt, taxonomy -> vulnerability function
    for vset in nrml.read(fname).vulnerabilityModel:
        categories['assetCategory'].add(vset['assetCategory'])
        categories['lossCategory'].add(vset['lossCategory'])
        categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])
        IML = vset.IML
        imt_str = IML['IMT']
        imls = ~IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))
    del categories['vulnerabilitySetID']
    return vf_dict, categories
예제 #25
0
    def test_sampling_lr_gmf_less_than_first_vulnimls(self):
        # Sampling loss ratios (covs greater than zero), Ground Motion Fields
        # IMLs outside range defined by Vulnerability function's imls, some
        # values are less than the lower bound.
        vuln_function = scientific.VulnerabilityFunction(
            'VF', 'PGA',
            [0.10, 0.30, 0.50, 1.00], [0.05, 0.10, 0.15, 0.30],
            [0.30, 0.30, 0.20, 0.20], "LN")

        gmfs = (0.08, 0.9706, 0.9572, 0.4854, 0.8003,
                0.1419, 0.4218, 0.9157, 0.05, 0.9595)

        numpy.testing.assert_allclose(
            numpy.array([0., 0.4105595, 0.18002423, 0.17102685, 0.25077079,
                         0.03945861, 0.11454372, 0.28828653, 0., 0.48847448]),
            vuln_function(gmfs, EPSILONS), atol=0.0, rtol=0.01)
예제 #26
0
def get_vulnerability_functions_04(node, fname):
    """
    :param node:
        a vulnerabilityModel node
    :param fname:
        path to the vulnerability file
    :returns:
        a dictionary imt, taxonomy -> vulnerability function
    """
    # NB: the fname below can contain non-ASCII characters
    logging.warn(u'Please upgrade %s to NRML 0.5', fname)
    # NB: the IMTs can be duplicated and with different levels, each
    # vulnerability function in a set will get its own levels
    imts = set()
    taxonomies = set()
    # imt, taxonomy -> vulnerability function
    vmodel = scientific.VulnerabilityModel(**node.attrib)
    for vset in node:
        imt_str = vset.IML['IMT']
        imls = ~vset.IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vmodel[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    return vmodel
예제 #27
0
    def test_sampling_lr_gmfs_greater_than_last_vulnimls(self):
        # Sampling loss ratios (covs greater than zero), Ground Motion Fields
        # IMLs outside range defined by Vulnerability function's imls, some
        # values are greater than the upper bound.
        imls = [0.10, 0.30, 0.50, 1.00]
        loss_ratios = [0.05, 0.10, 0.15, 0.30]
        covs = [0.30, 0.30, 0.20, 0.20]
        vuln_function = scientific.VulnerabilityFunction(
            'VF', 'PGA', imls, loss_ratios, covs, "LN")

        gmfs = (1.1, 0.9706, 0.9572, 0.4854, 0.8003,
                0.1419, 0.4218, 0.9157, 1.05, 0.9595)

        numpy.testing.assert_allclose(
            numpy.array([0.3272, 0.4105, 0.1800, 0.1710, 0.2508,
                         0.0394, 0.1145, 0.2883, 0.5975, 0.4885]),
            vuln_function(gmfs, EPSILONS), atol=0.0, rtol=0.01)
예제 #28
0
    def test_compute_lrem_using_beta_distribution(self):
        expected_lrem = [
            [1.0000000, 1.0000000, 1.0000000, 1.0000000, 1.0000000],
            [0.9895151, 0.9999409, 1.0000000, 1.0000000, 1.0000000],
            [0.9175720, 0.9981966, 0.9999997, 1.0000000, 1.0000000],
            [0.7764311, 0.9887521, 0.9999922, 1.0000000, 1.0000000],
            [0.6033381, 0.9633258, 0.9999305, 1.0000000, 1.0000000],
            [0.4364471, 0.9160514, 0.9996459, 1.0000000, 1.0000000],
            [0.2975979, 0.8460938, 0.9987356, 1.0000000, 1.0000000],
            [0.1931667, 0.7574557, 0.9964704, 1.0000000, 1.0000000],
            [0.1202530, 0.6571491, 0.9917729, 0.9999999, 1.0000000],
            [0.0722091, 0.5530379, 0.9832939, 0.9999997, 1.0000000],
            [0.0420056, 0.4521525, 0.9695756, 0.9999988, 1.0000000],
            [0.0130890, 0.2790107, 0.9213254, 0.9999887, 1.0000000],
            [0.0037081, 0.1564388, 0.8409617, 0.9999306, 1.0000000],
            [0.0009665, 0.0805799, 0.7311262, 0.9996882, 1.0000000],
            [0.0002335, 0.0384571, 0.6024948, 0.9988955, 1.0000000],
            [0.0000526, 0.0171150, 0.4696314, 0.9967629, 1.0000000],
            [0.0000022, 0.0027969, 0.2413923, 0.9820831, 1.0000000],
            [0.0000001, 0.0003598, 0.0998227, 0.9364072, 1.0000000],
            [0.0000000, 0.0000367, 0.0334502, 0.8381920, 0.9999995],
            [0.0000000, 0.0000030, 0.0091150, 0.6821293, 0.9999959],
            [0.0000000, 0.0000002, 0.0020162, 0.4909782, 0.9999755],
            [0.0000000, 0.0000000, 0.0000509, 0.1617086, 0.9995033],
            [0.0000000, 0.0000000, 0.0000005, 0.0256980, 0.9945488],
            [0.0000000, 0.0000000, 0.0000000, 0.0016231, 0.9633558],
            [0.0000000, 0.0000000, 0.0000000, 0.0000288, 0.8399534],
            [0.0000000, 0.0000000, 0.0000000, 0.0000001, 0.5409583],
            [0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.3413124],
            [0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.1589844],
            [0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0421052],
            [0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0027925],
            [0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000]
        ]

        vf = scientific.VulnerabilityFunction('VF', 'PGA', self.imls,
                                              self.mean_loss_ratios, self.covs,
                                              "BT")
        vf.seed = 42
        vf.init()
        loss_ratios = tuple(vf.mean_loss_ratios_with_steps(5))
        lrem = vf.loss_ratio_exceedance_matrix(loss_ratios)
        numpy.testing.assert_allclose(expected_lrem,
                                      lrem,
                                      rtol=0.0,
                                      atol=0.0005)
예제 #29
0
    def test_sampling_lr_gmfs_greater_than_last_vulnimls(self):
        # Sampling loss ratios (covs greater than zero), Ground Motion Fields
        # IMLs outside range defined by Vulnerability function's imls, some
        # values are greater than the upper bound.
        imls = [0.10, 0.30, 0.50, 1.00]
        loss_ratios = [0.05, 0.10, 0.15, 0.30]
        covs = [0.30, 0.30, 0.20, 0.20]
        vuln_function = scientific.VulnerabilityFunction(
            'VF', 'PGA', imls, loss_ratios, covs, "LN")
        vuln_function.init()

        gmfs = (1.1, 0.9706, 0.9572, 0.4854, 0.8003,
                0.1419, 0.4218, 0.9157, 1.05, 0.9595)

        numpy.testing.assert_allclose(
            numpy.array([[0.236383, 0.175115, 0.297628, 0.181567, 0.344077,
                          0.091288, 0.165941, 0.300442, 0.207284, 0.25775]]),
            call(vuln_function, gmfs, EIDS, self.RNG),
            atol=0.0, rtol=0.01)
예제 #30
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction('VF', 'PGA',
                                              [0.001, 0.2, 0.3, 0.5, 0.7],
                                              [0.01, 0.1, 0.2, 0.4, 0.8],
                                              [0.0, 0.0, 0.0, 0.0, 0.0])

        epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0)
        loss_ratios = vf.apply_to(gmf[0:2], epsilons)

        values = [3000., 1000.]
        insured_limits = [1250., 40.]
        deductibles = [40., 13.]

        insured_average_losses = [
            scientific.average_loss(
                scientific.event_based(
                    scientific.insured_losses(lrs, deductibles[i] / values[i],
                                              insured_limits[i] / values[i]),
                    50, 50, 20)) for i, lrs in enumerate(loss_ratios)
        ]
        numpy.testing.assert_allclose([0.05667045, 0.02542965],
                                      insured_average_losses)

        wf = workflows.ProbabilisticEventBased(
            'PGA',
            'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=True)
        out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(out.event_loss_table, {
            1: 0.20314761658291458,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
        })