コード例 #1
0
ファイル: bcr_test.py プロジェクト: julgp/oq-risklib
    def test_bcr_classical(self):
        vulnerability_function_rm = (
            scientific.VulnerabilityFunction(
                'PGA',
                [0.1, 0.2, 0.3, 0.45, 0.6],
                [0.05, 0.1, 0.2, 0.4, 0.8],
                [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        vulnerability_function_rf = (
            scientific.VulnerabilityFunction(
                'PGA',
                [0.1, 0.2, 0.3, 0.45, 0.6],
                [0.035, 0.07, 0.14, 0.28, 0.56],
                [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        asset_value = 2.
        retrofitting_cost = .1
        interest_rate = 0.05
        asset_life_expectancy = 40

        hazard = [
            (0.001, 0.0398612669790014), (0.01, 0.0398612669790014),
            (0.05, 0.0397287574802989), (0.1, 0.0296134266256125),
            (0.15, 0.0198273287564916), (0.2, 0.0130622701614519),
            (0.25, 0.00865538795000043), (0.3, 0.00589852059368967),
            (0.35, 0.00406169858951178), (0.4, 0.00281172717952682),
            (0.45, 0.00199511741777669), (0.5, 0.00135870597284571),
            (0.55, 0.000989667841573727), (0.6, 0.000757544444296432),
            (0.7, 0.000272824002045979), (0.8, 0.0),
            (0.9, 0.0), (1.0, 0.0)]

        original_loss_ratio_curve = scientific.classical(
            vulnerability_function_rm,
            hazard,
            steps=5)
        retrofitted_loss_ratio_curve = scientific.classical(
            vulnerability_function_rf,
            hazard,
            steps=5)

        eal_original = scientific.average_loss(*original_loss_ratio_curve)
        eal_retrofitted = scientific.average_loss(
            *retrofitted_loss_ratio_curve)

        bcr = scientific.bcr(
            eal_original, eal_retrofitted,
            interest_rate,
            asset_life_expectancy,
            asset_value,
            retrofitting_cost)

        self.assertAlmostEqual(0.009379,
                               eal_original * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.006586,
                               eal_retrofitted * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.483091, bcr, delta=0.009)
コード例 #2
0
ファイル: core.py プロジェクト: kenxshao/oq-engine
def do_classical_bcr(loss_type, units, containers, params, profile):
    for unit_orig, unit_retro in utils.pairwise(units):
        with profile('getting hazard'):
            assets, hazard_curves = unit_orig.getter()
            _, hazard_curves_retrofitted = unit_retro.getter()

        with profile('computing bcr'):
            original_loss_curves = unit_orig.calc(hazard_curves)
            retrofitted_loss_curves = unit_retro.calc(
                hazard_curves_retrofitted)

            eal_original = [
                scientific.average_loss(losses, poes)
                for losses, poes in original_loss_curves]

            eal_retrofitted = [
                scientific.average_loss(losses, poes)
                for losses, poes in retrofitted_loss_curves]

            bcr_results = [
                scientific.bcr(
                    eal_original[i], eal_retrofitted[i],
                    params.interest_rate, params.asset_life_expectancy,
                    asset.value(loss_type), asset.retrofitted(loss_type))
                for i, asset in enumerate(assets)]

        with logs.tracing('writing results'):
            containers.write(
                assets, zip(eal_original, eal_retrofitted, bcr_results),
                output_type="bcr_distribution",
                loss_type=loss_type,
                hazard_output_id=unit_orig.getter.hazard_output.id)
コード例 #3
0
def classical_risk(riskinput, riskmodel, param, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinput:
        a :class:`openquake.risklib.riskinput.RiskInput` object
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param param:
        dictionary of extra parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    """
    ins = param['insured_losses']
    result = dict(loss_curves=[], stat_curves=[])
    all_outputs = list(riskmodel.gen_outputs(riskinput, monitor))
    for outputs in all_outputs:
        r = outputs.r
        outputs.average_losses = AccumDict(accum=[])  # l -> array
        for l, (loss_curves, insured_curves) in enumerate(outputs):
            for i, asset in enumerate(outputs.assets):
                aid = asset.ordinal
                avg = scientific.average_loss(loss_curves[i])
                outputs.average_losses[l].append(avg)
                lcurve = (loss_curves[i, 0], loss_curves[i, 1], avg)
                if ins:
                    lcurve += (
                        insured_curves[i, 0], insured_curves[i, 1],
                        scientific.average_loss(insured_curves[i]))
                else:
                    lcurve += (None, None, None)
                result['loss_curves'].append((l, r, aid, lcurve))

    # compute statistics
    rlzs = riskinput.rlzs
    if len(rlzs) > 1 and param['stats']:
        w = param['weights']
        statnames, stats = zip(*param['stats'])
        l_idxs = range(len(riskmodel.lti))
        for assets, rows in groupby(
                all_outputs, lambda o: tuple(o.assets)).items():
            weights = [w[row.r] for row in rows]
            row = rows[0]
            for l in l_idxs:
                for i, asset in enumerate(assets):
                    avgs = numpy.array([r.average_losses[l][i] for r in rows])
                    avg_stats = compute_stats(avgs, stats, weights)
                    # row is index by the loss type index l and row[l]
                    # is a pair loss_curves, insured_loss_curves
                    # loss_curves[i, 0] are the i-th losses,
                    # loss_curves[i, 1] are the i-th poes
                    losses = row[l][0][i, 0]
                    poes_stats = compute_stats(
                        numpy.array([row[l][0][i, 1] for row in rows]),
                        stats, weights)
                    result['stat_curves'].append(
                        (l, asset.ordinal, losses, poes_stats, avg_stats))
    return result
コード例 #4
0
ファイル: workflows.py プロジェクト: julgp/oq-risklib
    def __call__(self, loss_type, assets, ground_motion_values, epsilons,
                 event_ids):
        """
        :param str loss_type: the loss type considered

        :param assets:
           assets is an iterator over
           :class:`openquake.risklib.workflows.Asset` instances

        :param ground_motion_values:
           a numpy array with ground_motion_values of shape N x R

        :param epsilons:
           a numpy array with stochastic values of shape N x R

        :param event_ids:
           a numpy array of R event ID (integer)

        :returns:
            a
            :class:`openquake.risklib.workflows.ProbabilisticEventBased.Output`
            instance.
        """
        loss_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)

        curves = self.curves(loss_matrix)
        average_losses = numpy.array([scientific.average_loss(losses, poes)
                                      for losses, poes in curves])
        stddev_losses = numpy.std(loss_matrix, axis=1)
        values = utils.numpy_map(lambda a: a.value(loss_type), assets)
        maps = self.maps(curves)
        elt = self.event_loss(loss_matrix.transpose() * values, event_ids)

        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_matrix = utils.numpy_map(
                scientific.insured_losses, loss_matrix, deductibles, limits)
            insured_curves = self.curves(insured_loss_matrix)
            average_insured_losses = [
                scientific.average_loss(losses, poes)
                for losses, poes in insured_curves]
            stddev_insured_losses = numpy.std(insured_loss_matrix, axis=1)
        else:
            insured_curves = None
            average_insured_losses = None
            stddev_insured_losses = None

        return self.Output(
            assets, loss_matrix if self.return_loss_matrix else None,
            curves, average_losses, stddev_losses,
            insured_curves, average_insured_losses, stddev_insured_losses,
            maps, elt)
コード例 #5
0
ファイル: bcr_test.py プロジェクト: dynaryu/oq-risklib
    def test_bcr_classical(self):
        vulnerability_function_rm = (
            scientific.VulnerabilityFunction(
                'RM', 'PGA',
                [0.1, 0.2, 0.3, 0.45, 0.6],
                [0.05, 0.1, 0.2, 0.4, 0.8],
                [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        vulnerability_function_rf = (
            scientific.VulnerabilityFunction(
                'RF', 'PGA',
                [0.1, 0.2, 0.3, 0.45, 0.6],
                [0.035, 0.07, 0.14, 0.28, 0.56],
                [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        asset_value = 2.
        retrofitting_cost = .1
        interest_rate = 0.05
        asset_life_expectancy = 40

        hazard_imls = [0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
                       0.45, 0.5, 0.55, 0.6, 0.7, 0.8, 0.9, 1.0]
        poes = [0.039861266979, 0.039861266979, 0.0397287574803,
                0.0296134266256, 0.0198273287565, 0.0130622701615,
                0.00865538795, 0.00589852059369, 0.00406169858951,
                0.00281172717953, 0.00199511741778, 0.00135870597285,
                0.000989667841574, 0.000757544444296, 0.000272824002046,
                0.0, 0.0, 0.]

        original_loss_ratio_curve = scientific.classical(
            vulnerability_function_rm, hazard_imls, poes, steps=5)
        retrofitted_loss_ratio_curve = scientific.classical(
            vulnerability_function_rf, hazard_imls, poes, steps=5)

        eal_original = scientific.average_loss(original_loss_ratio_curve)
        eal_retrofitted = scientific.average_loss(retrofitted_loss_ratio_curve)

        bcr = scientific.bcr(
            eal_original, eal_retrofitted,
            interest_rate,
            asset_life_expectancy,
            asset_value,
            retrofitting_cost)

        self.assertAlmostEqual(0.009379,
                               eal_original * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.006586,
                               eal_retrofitted * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.483091, bcr, delta=0.009)
コード例 #6
0
ファイル: risk.py プロジェクト: panagop/oq-engine
def export_rcurves_rlzs(ekey, dstore):
    assetcol = dstore['assetcol']
    aref = dstore['asset_refs'].value
    rcurves = dstore[ekey[0]]
    [loss_ratios] = dstore['loss_ratios']
    fnames = []
    writercls = (risk_writers.LossCurveGeoJSONWriter
                 if ekey[0] == 'geojson' else risk_writers.LossCurveXMLWriter)
    for writer, (ltype, poe, r, ins) in _gen_writers(dstore, writercls,
                                                     ekey[0]):
        if ltype not in loss_ratios.dtype.names:
            continue  # ignore loss type
        the_poes = rcurves[ltype][:, r, ins]
        curves = []
        for aid, ass in enumerate(assetcol):
            loc = Location(*ass.location)
            losses = loss_ratios[ltype] * ass.value(ltype)
            poes = the_poes[aid]
            avg = scientific.average_loss([losses, poes])
            curve = LossCurve(loc, aref[ass.idx], poes, losses,
                              loss_ratios[ltype], avg, None)
            curves.append(curve)
        writer.serialize(curves)
        fnames.append(writer._dest)
    return sorted(fnames)
コード例 #7
0
ファイル: event_based_test.py プロジェクト: rcgee/oq-risklib
    def test_mean_based_with_partial_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (
            scientific.VulnerabilityFunction(
                'SOME-TAXONOMY', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.01, 0.02, 0.02, 0.01, 0.03]))
        gmvs = numpy.array([[10., 20., 30., 40., 50.],
                           [1., 2., 3., 4., 5.]])
        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5)
        loss_matrix = vf.apply_to(gmvs, epsilons)

        losses_poes = scientific.event_based(loss_matrix[0], .25, 4)
        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.48983614471, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False
            )
        wf.riskmodel = mock.MagicMock()
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        numpy.testing.assert_almost_equal(
            out.average_losses, [0.01987912, 0.01929152])
コード例 #8
0
ファイル: risk.py プロジェクト: gem/oq-risklib
def export_rcurves_rlzs(ekey, dstore):
    assetcol = dstore['assetcol/array'].value
    aref = dstore['asset_refs'].value
    rcurves = dstore[ekey[0]]
    [loss_ratios] = dstore['loss_ratios']
    fnames = []
    writercls = (risk_writers.LossCurveGeoJSONWriter
                 if ekey[0] == 'geojson' else
                 risk_writers.LossCurveXMLWriter)
    for writer, (ltype, poe, r, ins) in _gen_writers(
            dstore, writercls, ekey[0]):
        if ltype not in loss_ratios.dtype.names:
            continue  # ignore loss type
        array = rcurves[ltype][:, r, ins]
        curves = []
        for ass, poes in zip(assetcol, array):
            loc = Location(ass['lon'], ass['lat'])
            losses = loss_ratios[ltype] * ass[ltype]
            avg = scientific.average_loss((losses, poes))
            curve = LossCurve(loc, aref[ass['idx']], poes,
                              losses, loss_ratios[ltype], avg, None)
            curves.append(curve)
        writer.serialize(curves)
        fnames.append(writer._dest)
    return sorted(fnames)
コード例 #9
0
ファイル: risk.py プロジェクト: rcgee/oq-risklib
def export_rcurves_rlzs(ekey, dstore):
    assetcol = dstore['assetcol']
    sitemesh = dstore['sitemesh']
    rcurves = dstore[ekey[0]]
    cbuilders = dstore['riskmodel'].curve_builders
    fnames = []
    writercls = (risk_writers.LossCurveGeoJSONWriter
                 if ekey[0] == 'geojson' else risk_writers.LossCurveXMLWriter)
    for writer, (ltype, r, ins) in _gen_writers(dstore, writercls, ekey[0]):
        for cb in cbuilders:
            if cb.user_provided and cb.loss_type == ltype:
                loss_ratios = cb.ratios
                break
        else:  # no break, ignore loss type
            continue
        array = rcurves[ltype][:, r, ins]
        curves = []
        for ass, poes in zip(assetcol, array):
            loc = Location(sitemesh[ass['site_id']])
            losses = cb.ratios * ass[cb.loss_type]
            avg = scientific.average_loss((losses, poes))
            curve = LossCurve(loc, ass['asset_ref'], poes, losses, loss_ratios,
                              avg, None)
            curves.append(curve)
        writer.serialize(curves)
        fnames.append(writer._dest)
    return sorted(fnames)
コード例 #10
0
ファイル: event_based_test.py プロジェクト: julgp/oq-risklib
    def test_mean_based_with_partial_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (
            scientific.VulnerabilityFunction(
                'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.01, 0.02, 0.02, 0.01, 0.03]))
        gmvs = numpy.array([[10., 20., 30., 40., 50.],
                           [1., 2., 3., 4., 5.]])
        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5)
        loss_matrix = vf.apply_to(gmvs, epsilons)

        losses, poes = scientific.event_based(loss_matrix[0], 120, 30, 4)
        first_curve_integral = scientific.average_loss(losses, poes)

        self.assertAlmostEqual(0.48983614471, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            vulnerability_functions={self.loss_type: vf},
            time_span=50,
            tses=10000,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False
            )
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table,
            {1: 15.332714802464356,
             2: 16.21582466071975,
             3: 15.646630129345354,
             4: 15.285164778325353,
             5: 15.860930792931873,
             })
コード例 #11
0
ファイル: writers.py プロジェクト: kenxshao/oq-engine
def loss_curve(loss_type, loss_curve_id, assets, curves):
    """
    Stores and returns a :class:`openquake.engine.db.models.LossCurveData`
    where the data are got by `asset_output` and the
    :class:`openquake.engine.db.models.LossCurve` output container is
    identified by `loss_curve_id`.

    :param int loss_curve_id:
        The ID of the output container.
    :param asset:
        An instance of :class:`openquake.engine.db.models.ExposureData`.
    :param loss_ratios:
        A list of loss ratios.
    :param poes:
        A list of poes associated to `loss_ratios`.
    :param float average_loss_ratio:
        The average loss ratio of the curve.
    """

    for asset, (losses, poes) in itertools.izip(assets, curves):
        models.LossCurveData.objects.create(
            loss_curve_id=loss_curve_id,
            asset_ref=asset.asset_ref,
            location=asset.site,
            poes=poes,
            loss_ratios=losses,
            asset_value=asset.value(loss_type),
            average_loss_ratio=scientific.average_loss(losses, poes))
コード例 #12
0
def build_agg_curve(lr_list, insured_losses, ses_ratio, curve_resolution,
                    monitor):
    """
    Build the aggregate loss curve in parallel for each loss type
    and realization pair.

    :param lr_list:
        a list of triples `(l, r, data)` where `l` is a loss type string,
        `r` as realization index and `data` is an array of pairs
        `(rupture_id, loss)` where loss is an array with two values
    :param insured_losses:
        job.ini configuration parameter
    :param ses_ratio:
        a ratio obtained from ses_per_logic_tree_path
    :param curve_resolution:
        the number of discretization steps for the loss curve
    :param monitor:
        a Monitor instance
    :returns:
        a dictionary (r, l, i) -> (losses, poes, avg)
    """
    result = {}
    for l, r, data in lr_list:
        if len(data) == 0:  # realization with no losses
            continue
        for i in range(insured_losses + 1):  # insured_losses
            the_losses = numpy.array(
                [loss[i] for _rupid, loss in data], F32)
            losses, poes = scientific.event_based(
                the_losses, ses_ratio, curve_resolution)
            avg = scientific.average_loss((losses, poes))
            result[l, r, i] = (losses, poes, avg)
    return result
コード例 #13
0
    def test_mean_based_with_partial_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (scientific.VulnerabilityFunction('SOME-TAXONOMY', 'PGA',
                                               [0.001, 0.2, 0.3, 0.5, 0.7],
                                               [0.01, 0.1, 0.2, 0.4, 0.8],
                                               [0.01, 0.02, 0.02, 0.01, 0.03]))
        gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]])
        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0.5)
        loss_matrix = vf.apply_to(gmvs, epsilons)

        losses_poes = scientific.event_based(loss_matrix[0], 120, 30, 4)
        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.48983614471, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA',
            'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False)
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table, {
                1: 15.332714802464356,
                2: 16.21582466071975,
                3: 15.646630129345354,
                4: 15.285164778325353,
                5: 15.860930792931873,
            })
コード例 #14
0
    def build_loss_curves(self, elass, loss_type, i):
        """
        Build loss curves per asset from a set of losses with length given by
        the parameter loss_curve_resolution.

        :param elass: a dict (loss_type, asset_id) -> (tag, loss, ins_loss)
        :param loss_type: the loss_type
        :param i: 1 for loss curves or 2 for insured losses
        :returns: an array of loss curves, one for each asset
        """
        oq = self.oqparam
        C = oq.loss_curve_resolution
        lcs = []
        for asset in self.assets:
            all_losses = [loss[i] for loss in elass[loss_type, asset.id]]
            if all_losses:
                losses, poes = scientific.event_based(
                    all_losses, tses=oq.tses,
                    time_span=oq.risk_investigation_time or
                    oq.investigation_time, curve_resolution=C)
                avg = scientific.average_loss((losses, poes))
            else:
                losses, poes = numpy.zeros(C), numpy.zeros(C)
                avg = 0
            lcs.append((losses, poes, avg))
        return numpy.array(lcs, self.loss_curve_dt)
コード例 #15
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction(
            [0.001, 0.2, 0.3, 0.5, 0.7],
            [0.01, 0.1, 0.2, 0.4, 0.8],
            [0.0, 0.0, 0.0, 0.0, 0.0],
            "LN")

        loss_ratios = scientific.vulnerability_function_applier(
            vf, gmf[0:2])

        values = [3000, 1000]
        insured_limits = [1250., 40.]
        deductibles = [40, 13]

        insured_average_losses = [
            scientific.average_loss(*scientific.event_based(
                scientific.insured_losses(
                    loss_ratios,
                    deductibles[i] / values[i], insured_limits[i] / values[i]),
                50, 50, 20))
            for i, loss_ratios in enumerate(loss_ratios)]

        numpy.testing.assert_allclose(
            [207.86489132 / 3000,   38.07815797 / 1000],
            insured_average_losses)
コード例 #16
0
ファイル: bcr_test.py プロジェクト: ruthali/oq-risklib
    def test_bcr_classical(self):
        vulnerability_function_rm = (scientific.VulnerabilityFunction(
            'RM', 'PGA', [0.1, 0.2, 0.3, 0.45, 0.6],
            [0.05, 0.1, 0.2, 0.4, 0.8], [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        vulnerability_function_rf = (scientific.VulnerabilityFunction(
            'RF', 'PGA', [0.1, 0.2, 0.3, 0.45, 0.6],
            [0.035, 0.07, 0.14, 0.28, 0.56], [0.5, 0.4, 0.3, 0.2, 0.1], "LN"))

        asset_value = 2.
        retrofitting_cost = .1
        interest_rate = 0.05
        asset_life_expectancy = 40

        hazard_imls = [
            0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
            0.55, 0.6, 0.7, 0.8, 0.9, 1.0
        ]
        poes = [
            0.039861266979, 0.039861266979, 0.0397287574803, 0.0296134266256,
            0.0198273287565, 0.0130622701615, 0.00865538795, 0.00589852059369,
            0.00406169858951, 0.00281172717953, 0.00199511741778,
            0.00135870597285, 0.000989667841574, 0.000757544444296,
            0.000272824002046, 0.0, 0.0, 0.
        ]

        original_loss_ratio_curve = scientific.classical(
            vulnerability_function_rm, hazard_imls, poes, steps=5)
        retrofitted_loss_ratio_curve = scientific.classical(
            vulnerability_function_rf, hazard_imls, poes, steps=5)

        eal_original = scientific.average_loss(original_loss_ratio_curve)
        eal_retrofitted = scientific.average_loss(retrofitted_loss_ratio_curve)

        bcr = scientific.bcr(eal_original, eal_retrofitted, interest_rate,
                             asset_life_expectancy, asset_value,
                             retrofitting_cost)

        self.assertAlmostEqual(0.009379,
                               eal_original * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.006586,
                               eal_retrofitted * asset_value,
                               delta=0.0009)

        self.assertAlmostEqual(0.483091, bcr, delta=0.009)
コード例 #17
0
ファイル: event_based_risk.py プロジェクト: rcgee/oq-engine
def build_agg_curve(lr_data, insured_losses, ses_ratio, curve_resolution, L,
                    monitor):
    """
    Build the aggregate loss curve in parallel for each loss type
    and realization pair.

    :param lr_data:
        a list of triples `(l, r, data)` where `l` is the loss type index,
        `r` is the realization index and `data` is an array of kind
        `(rupture_id, loss)` or `(rupture_id, loss, loss_ins)`
    :param bool insured_losses:
        job.ini configuration parameter
    :param ses_ratio:
        a ratio obtained from ses_per_logic_tree_path
    :param curve_resolution:
        the number of discretization steps for the loss curve
    :param L:
        the number of loss types
    :param monitor:
        a Monitor instance
    :returns:
        a dictionary (r, l, i) -> (losses, poes, avg)
    """
    result = {}
    for l, r, data in lr_data:
        if len(data) == 0:  # realization with no losses
            continue
        if insured_losses:
            gloss = data['loss'][:, 0]
            iloss = data['loss'][:, 1]
        else:
            gloss = data['loss']
        losses, poes = scientific.event_based(
            gloss, ses_ratio, curve_resolution)
        avg = scientific.average_loss((losses, poes))
        result[l, r, 'losses'] = losses
        result[l, r, 'poes'] = poes
        result[l, r, 'avg'] = avg
        if insured_losses:
            losses_ins, poes_ins = scientific.event_based(
                iloss, ses_ratio, curve_resolution)
            avg_ins = scientific.average_loss((losses_ins, poes_ins))
            result[l, r, 'losses_ins'] = losses_ins
            result[l, r, 'poes_ins'] = poes_ins
            result[l, r, 'avg_ins'] = avg_ins
    return result
コード例 #18
0
def build_agg_curve(lr_data, insured_losses, ses_ratio, curve_resolution, L,
                    monitor):
    """
    Build the aggregate loss curve in parallel for each loss type
    and realization pair.

    :param lr_data:
        a list of triples `(l, r, data)` where `l` is the loss type index,
        `r` is the realization index and `data` is an array of kind
        `(rupture_id, loss)` or `(rupture_id, loss, loss_ins)`
    :param bool insured_losses:
        job.ini configuration parameter
    :param ses_ratio:
        a ratio obtained from ses_per_logic_tree_path
    :param curve_resolution:
        the number of discretization steps for the loss curve
    :param L:
        the number of loss types
    :param monitor:
        a Monitor instance
    :returns:
        a dictionary (r, l, i) -> (losses, poes, avg)
    """
    result = {}
    for l, r, data in lr_data:
        if len(data) == 0:  # realization with no losses
            continue
        if insured_losses:
            gloss = data['loss'][:, 0]
            iloss = data['loss'][:, 1]
        else:
            gloss = data['loss']
        losses, poes = scientific.event_based(gloss, ses_ratio,
                                              curve_resolution)
        avg = scientific.average_loss((losses, poes))
        result[l, r, 'losses'] = losses
        result[l, r, 'poes'] = poes
        result[l, r, 'avg'] = avg
        if insured_losses:
            losses_ins, poes_ins = scientific.event_based(
                iloss, ses_ratio, curve_resolution)
            avg_ins = scientific.average_loss((losses_ins, poes_ins))
            result[l, r, 'losses_ins'] = losses_ins
            result[l, r, 'poes_ins'] = poes_ins
            result[l, r, 'avg_ins'] = avg_ins
    return result
コード例 #19
0
    def test_mean_based(self):
        epsilons = scientific.make_epsilons([gmf[0]], seed=1, correlation=0)
        vulnerability_function_rm = (
            scientific.VulnerabilityFunction(
                'RM', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.0, 0.0, 0.0, 0.0, 0.0]))

        vulnerability_function_rc = (
            scientific.VulnerabilityFunction(
                'RC', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.0035, 0.07, 0.14, 0.28, 0.56],
                [0.0, 0.0, 0.0, 0.0, 0.0]))

        cr = 50  # curve resolution
        curve_rm_1 = scientific.event_based(
            vulnerability_function_rm.apply_to(
                [gmf[0]], epsilons)[0], 50, 50, cr)

        curve_rm_2 = scientific.event_based(
            vulnerability_function_rm.apply_to(
                [gmf[1]], epsilons)[0], 50, 50, cr)

        curve_rc = scientific.event_based(
            vulnerability_function_rc.apply_to(
                [gmf[2]], epsilons)[0], 50, 50, cr)

        for i, curve_rm in enumerate([curve_rm_1, curve_rm_2]):

            conditional_loss = scientific.conditional_loss_ratio(
                curve_rm[0], curve_rm[1], 0.8)
            self.assertAlmostEqual([0.0490311, 0.0428061][i], conditional_loss)

            self.assertAlmostEqual(
                [0.070219108, 0.04549904][i],
                scientific.average_loss(curve_rm))

        conditional_loss = scientific.conditional_loss_ratio(
            curve_rc[0], curve_rc[1], 0.8)
        self.assertAlmostEqual(0.0152273, conditional_loss)

        self.assertAlmostEqual(
            0.0152393, scientific.average_loss(curve_rc))
コード例 #20
0
ファイル: event_based_test.py プロジェクト: rcgee/oq-risklib
    def test_mean_based(self):
        epsilons = scientific.make_epsilons([gmf[0]], seed=1, correlation=0)
        vulnerability_function_rm = (
            scientific.VulnerabilityFunction(
                'RM', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.0, 0.0, 0.0, 0.0, 0.0]))

        vulnerability_function_rc = (
            scientific.VulnerabilityFunction(
                'RC', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.0035, 0.07, 0.14, 0.28, 0.56],
                [0.0, 0.0, 0.0, 0.0, 0.0]))

        cr = 50  # curve resolution
        curve_rm_1 = scientific.event_based(
            vulnerability_function_rm.apply_to(
                [gmf[0]], epsilons)[0], 1, cr)

        curve_rm_2 = scientific.event_based(
            vulnerability_function_rm.apply_to(
                [gmf[1]], epsilons)[0], 1, cr)

        curve_rc = scientific.event_based(
            vulnerability_function_rc.apply_to(
                [gmf[2]], epsilons)[0], 1, cr)

        for i, curve_rm in enumerate([curve_rm_1, curve_rm_2]):

            conditional_loss = scientific.conditional_loss_ratio(
                curve_rm[0], curve_rm[1], 0.8)
            self.assertAlmostEqual([0.0490311, 0.0428061][i], conditional_loss)

            self.assertAlmostEqual(
                [0.070219108, 0.04549904][i],
                scientific.average_loss(curve_rm))

        conditional_loss = scientific.conditional_loss_ratio(
            curve_rc[0], curve_rc[1], 0.8)
        self.assertAlmostEqual(0.0152273, conditional_loss)

        self.assertAlmostEqual(
            0.0152393, scientific.average_loss(curve_rc))
コード例 #21
0
    def test_mean_curve_computation(self):
        loss_ratio_curve = Curve([(0, 0.3460), (0.06, 0.12),
                                  (0.12, 0.057), (0.18, 0.04),
                                  (0.24, 0.019), (0.3, 0.009), (0.45, 0)])

        self.assertAlmostEqual(
            0.023305,
            scientific.average_loss(
                loss_ratio_curve.abscissae,
                loss_ratio_curve.ordinates), 3)
コード例 #22
0
    def classical_bcr(self, loss_type, assets, hazard, eids=None, eps=None):
        """
        :param loss_type: the loss type
        :param assets: a list of N assets of the same taxonomy
        :param hazard: an hazard curve
        :param _eps: dummy parameter, unused
        :param _eids: dummy parameter, unused
        :returns: a list of triples (eal_orig, eal_retro, bcr_result)
        """
        if loss_type != 'structural':
            raise NotImplementedError('retrofitted is not defined for ' +
                                      loss_type)
        n = len(assets)
        self.assets = assets
        vf = self.risk_functions[loss_type, 'vulnerability']
        imls = self.hazard_imtls[vf.imt]
        vf_retro = self.risk_functions[loss_type, 'vulnerability_retrofitted']
        curves_orig = functools.partial(
            scientific.classical,
            vf,
            imls,
            loss_ratios=self.loss_ratios_orig[loss_type])
        curves_retro = functools.partial(
            scientific.classical,
            vf_retro,
            imls,
            loss_ratios=self.loss_ratios_retro[loss_type])
        original_loss_curves = numpy.array([curves_orig(hazard)] * n)
        retrofitted_loss_curves = numpy.array([curves_retro(hazard)] * n)

        eal_original = numpy.array(
            [scientific.average_loss(lc) for lc in original_loss_curves])

        eal_retrofitted = numpy.array(
            [scientific.average_loss(lc) for lc in retrofitted_loss_curves])

        bcr_results = [
            scientific.bcr(eal_original[i], eal_retrofitted[i],
                           self.interest_rate, self.asset_life_expectancy,
                           asset['value-' + loss_type], asset['retrofitted'])
            for i, asset in enumerate(assets)
        ]
        return list(zip(eal_original, eal_retrofitted, bcr_results))
コード例 #23
0
ファイル: core.py プロジェクト: kenxshao/oq-engine
def do_event_based_bcr(loss_type, units, containers, params, profile):
    """
    See `event_based_bcr` for docstring
    """
    for unit_orig, unit_retro in utils.pairwise(units):

        with profile("getting hazard"):
            assets, (gmvs, _) = unit_orig.getter()
            if len(assets) == 0:
                logs.LOG.info("Exit from task as no asset could be processed")
                return

            _, (gmvs_retro, _) = unit_retro.getter()

        with profile("computing bcr"):
            _, original_loss_curves = unit_orig.calc(gmvs)
            _, retrofitted_loss_curves = unit_retro.calc(gmvs_retro)

            eal_original = [scientific.average_loss(losses, poes) for losses, poes in original_loss_curves]

            eal_retrofitted = [scientific.average_loss(losses, poes) for losses, poes in retrofitted_loss_curves]

            bcr_results = [
                scientific.bcr(
                    eal_original[i],
                    eal_retrofitted[i],
                    params.interest_rate,
                    params.asset_life_expectancy,
                    asset.value(loss_type),
                    asset.retrofitted(loss_type),
                )
                for i, asset in enumerate(assets)
            ]

        with profile("writing results"):
            containers.write(
                assets,
                zip(eal_original, eal_retrofitted, bcr_results),
                output_type="bcr_distribution",
                loss_type=loss_type,
                hazard_output_id=unit_orig.getter.hazard_output.id,
            )
コード例 #24
0
ファイル: classical_risk.py プロジェクト: acortesz/oq-engine
def classical_risk(riskinputs, riskmodel, param, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param param:
        dictionary of extra parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    """
    result = dict(loss_curves=[], stat_curves=[])
    for ri in riskinputs:
        all_outputs = list(riskmodel.gen_outputs(ri, monitor))
        for outputs in all_outputs:
            r = outputs.rlzi
            outputs.average_losses = AccumDict(accum=[])  # l -> array
            for l, loss_curves in enumerate(outputs):
                # loss_curves has shape (C, N, 2)
                for i, asset in enumerate(outputs.assets):
                    aid = asset.ordinal
                    avg = scientific.average_loss(loss_curves[:, i].T)
                    outputs.average_losses[l].append(avg)
                    lcurve = (loss_curves[:, i, 0], loss_curves[:, i, 1], avg)
                    result['loss_curves'].append((l, r, aid, lcurve))

        # compute statistics
        R = ri.hazard_getter.num_rlzs
        w = param['weights']
        statnames, stats = zip(*param['stats'])
        l_idxs = range(len(riskmodel.lti))
        for assets, outs in groupby(all_outputs,
                                    lambda o: tuple(o.assets)).items():
            weights = [w[out.rlzi] for out in outs]
            out = outs[0]
            for l in l_idxs:
                for i, asset in enumerate(assets):
                    avgs = numpy.array([r.average_losses[l][i] for r in outs])
                    avg_stats = compute_stats(avgs, stats, weights)
                    # is a pair loss_curves, insured_loss_curves
                    # out[l][:, i, 0] are the i-th losses
                    # out[l][:, i, 1] are the i-th poes
                    losses = out[l][:, i, 0]
                    poes_stats = compute_stats(
                        numpy.array([out[l][:, i, 1] for out in outs]), stats,
                        weights)
                    result['stat_curves'].append(
                        (l, asset.ordinal, losses, poes_stats, avg_stats))
    if R == 1:  # the realization is the same as the mean
        del result['loss_curves']
    return result
コード例 #25
0
    def test_mean_based(self):
        vulnerability_function_rm = (
            scientific.VulnerabilityFunction(
                [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.0, 0.0, 0.0, 0.0, 0.0], "LN"))

        vulnerability_function_rc = (
            scientific.VulnerabilityFunction(
                [0.001, 0.2, 0.3, 0.5, 0.7], [0.0035, 0.07, 0.14, 0.28, 0.56],
                [0.0, 0.0, 0.0, 0.0, 0.0], "LN"))

        curve_rm_1 = scientific.event_based(
            scientific.vulnerability_function_applier(
                vulnerability_function_rm, [gmf[0]])[0], 50, 50)

        curve_rm_2 = scientific.event_based(
            scientific.vulnerability_function_applier(
                vulnerability_function_rm, [gmf[1]])[0], 50, 50)

        curve_rc = scientific.event_based(
            scientific.vulnerability_function_applier(
                vulnerability_function_rc, [gmf[2]])[0], 50, 50)

        for i, curve_rm in enumerate([curve_rm_1, curve_rm_2]):

            conditional_loss = scientific.conditional_loss_ratio(
                curve_rm[0], curve_rm[1], 0.8)
            self.assertAlmostEqual([0.0490311, 0.0428061][i], conditional_loss)

            self.assertAlmostEqual(
                [0.070219108, 0.04549904][i],
                scientific.average_loss(curve_rm[0], curve_rm[1]))

        conditional_loss = scientific.conditional_loss_ratio(
            curve_rc[0], curve_rc[1], 0.8)
        self.assertAlmostEqual(0.0152273, conditional_loss)

        self.assertAlmostEqual(
            0.0152393,
            scientific.average_loss(curve_rc[0], curve_rc[1]))
コード例 #26
0
def classical_risk(riskinputs, param, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param param:
        dictionary of extra parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    """
    crmodel = monitor.read('crmodel')
    result = dict(loss_curves=[], stat_curves=[])
    weights = [w['default'] for w in param['weights']]
    statnames, stats = zip(*param['stats'])
    mon = monitor('getting hazard', measuremem=False)
    for ri in riskinputs:
        A = len(ri.asset_df)
        L = len(crmodel.lti)
        R = ri.hazard_getter.num_rlzs
        loss_curves = numpy.zeros((R, L, A), object)
        avg_losses = numpy.zeros((R, L, A))
        with mon:
            haz = ri.hazard_getter.get_hazard()
        for taxo, asset_df in ri.asset_df.groupby('taxonomy'):
            for rlz in range(R):
                pcurve = haz.extract(rlz)
                out = crmodel.get_output(taxo, asset_df, pcurve, rlz=rlz)
                for li, loss_type in enumerate(crmodel.loss_types):
                    # loss_curves has shape (A, C)
                    for i, asset in enumerate(asset_df.to_records()):
                        loss_curves[rlz, li, i] = lc = out[loss_type][i]
                        aid = asset['ordinal']
                        avg = scientific.average_loss(lc)
                        avg_losses[rlz, li, i] = avg
                        lcurve = (lc['loss'], lc['poe'], avg)
                        result['loss_curves'].append((li, rlz, aid, lcurve))

        # compute statistics
        for li, loss_type in enumerate(crmodel.loss_types):
            for i, asset in enumerate(ri.asset_df.to_records()):
                avg_stats = compute_stats(
                    avg_losses[:, li, i], stats, weights)
                losses = loss_curves[0, li, i]['loss']
                all_poes = numpy.array(
                    [loss_curves[r, li, i]['poe'] for r in range(R)])
                poes_stats = compute_stats(all_poes, stats, weights)
                result['stat_curves'].append(
                    (li, asset['ordinal'], losses, poes_stats, avg_stats))
    if R == 1:  # the realization is the same as the mean
        del result['loss_curves']
    return result
コード例 #27
0
ファイル: workflows.py プロジェクト: julgp/oq-risklib
    def __call__(self, loss_type, assets, hazard):
        self.assets = assets

        original_loss_curves = self.curves_orig[loss_type](hazard)
        retrofitted_loss_curves = self.curves_retro[loss_type](hazard)

        eal_original = [
            scientific.average_loss(losses, poes)
            for losses, poes in original_loss_curves]

        eal_retrofitted = [
            scientific.average_loss(losses, poes)
            for losses, poes in retrofitted_loss_curves]

        bcr_results = [
            scientific.bcr(
                eal_original[i], eal_retrofitted[i],
                self.interest_rate, self.asset_life_expectancy,
                asset.value(loss_type), asset.retrofitted(loss_type))
            for i, asset in enumerate(assets)]

        return zip(eal_original, eal_retrofitted, bcr_results)
コード例 #28
0
ファイル: riskmodels.py プロジェクト: digitalsatori/oq-engine
    def classical_bcr(self, loss_type, assets, hazard, eids=None, eps=None):
        """
        :param loss_type: the loss type
        :param assets: a list of N assets of the same taxonomy
        :param hazard: an hazard curve
        :param _eps: dummy parameter, unused
        :param _eids: dummy parameter, unused
        :returns: a list of triples (eal_orig, eal_retro, bcr_result)
        """
        if loss_type != 'structural':
            raise NotImplemented('retrofitted is not defined for ' + loss_type)
        n = len(assets)
        self.assets = assets
        vf = self.risk_functions[loss_type, 'vulnerability']
        imls = self.hazard_imtls[vf.imt]
        vf_retro = self.risk_functions[loss_type, 'vulnerability_retrofitted']
        curves_orig = functools.partial(
            scientific.classical, vf, imls,
            loss_ratios=self.loss_ratios_orig[loss_type])
        curves_retro = functools.partial(
            scientific.classical, vf_retro, imls,
            loss_ratios=self.loss_ratios_retro[loss_type])
        original_loss_curves = numpy.array([curves_orig(hazard)] * n)
        retrofitted_loss_curves = numpy.array([curves_retro(hazard)] * n)

        eal_original = numpy.array([scientific.average_loss(lc)
                                    for lc in original_loss_curves])

        eal_retrofitted = numpy.array([scientific.average_loss(lc)
                                       for lc in retrofitted_loss_curves])

        bcr_results = [
            scientific.bcr(
                eal_original[i], eal_retrofitted[i],
                self.interest_rate, self.asset_life_expectancy,
                asset['value-' + loss_type], asset['retrofitted'])
            for i, asset in enumerate(assets)]
        return list(zip(eal_original, eal_retrofitted, bcr_results))
コード例 #29
0
ファイル: workflows.py プロジェクト: julgp/oq-risklib
    def __call__(self, loss_type, assets, gmfs, epsilons, event_ids):
        self.assets = assets
        original_loss_curves = self.curves(
            self.vf_orig[loss_type].apply_to(gmfs, epsilons))
        retrofitted_loss_curves = self.curves(
            self.vf_retro[loss_type].apply_to(gmfs, epsilons))

        eal_original = [
            scientific.average_loss(losses, poes)
            for losses, poes in original_loss_curves]

        eal_retrofitted = [
            scientific.average_loss(losses, poes)
            for losses, poes in retrofitted_loss_curves]

        bcr_results = [
            scientific.bcr(
                eal_original[i], eal_retrofitted[i],
                self.interest_rate, self.asset_life_expectancy,
                asset.value(loss_type), asset.retrofitted(loss_type))
            for i, asset in enumerate(assets)]

        return zip(eal_original, eal_retrofitted, bcr_results)
コード例 #30
0
ファイル: workflows.py プロジェクト: julgp/oq-risklib
    def __call__(self, loss_type, assets, hazard_curves, _epsilons=None):
        """
        :param str loss_type:
            the loss type considered
        :param assets:
            assets is an iterator over N
            :class:`openquake.risklib.workflows.Asset` instances
        :param hazard_curves:
            curves is an iterator over hazard curves (numpy array shaped 2xR).
        :param _epsilons:
            ignored, here only for API compatibility with other calculators
        :returns:
            a :class:`openquake.risklib.workflows.Classical.Output` instance.
        """
        curves = self.curves[loss_type](hazard_curves)
        average_losses = numpy.array([scientific.average_loss(losses, poes)
                                      for losses, poes in curves])
        maps = self.maps(curves)
        fractions = self.fractions(curves)

        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]

            insured_curves = utils.numpy_map(
                scientific.insured_loss_curve, curves, deductibles, limits)
            average_insured_losses = [
                scientific.average_loss(losses, poes)
                for losses, poes in insured_curves]
        else:
            insured_curves = None
            average_insured_losses = None

        return self.Output(
            assets,
            curves, average_losses, insured_curves, average_insured_losses,
            maps, fractions)
コード例 #31
0
    def test_mean_based_with_partial_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (
            scientific.VulnerabilityFunction(
                [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.01, 0.02, 0.02, 0.01, 0.03], "LN"))
        gmvs = numpy.array([[10., 20., 30., 40., 50.],
                           [1., 2., 3., 4., 5.]])
        loss_matrix = scientific.vulnerability_function_applier(
            vf, gmvs, seed=1, asset_correlation=0.5)

        losses, poes = scientific.event_based(loss_matrix[0], 120, 30, 4)
        first_curve_integral = scientific.average_loss(losses, poes)

        self.assertAlmostEqual(0.48983614471, first_curve_integral)
コード例 #32
0
def classical_risk(riskinputs, riskmodel, param, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param param:
        dictionary of extra parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    """
    result = dict(loss_curves=[], stat_curves=[])
    weights = [w['default'] for w in param['weights']]
    statnames, stats = zip(*param['stats'])
    for ri in riskinputs:
        A = len(ri.assets)
        L = len(riskmodel.lti)
        R = ri.hazard_getter.num_rlzs
        loss_curves = numpy.zeros((R, L, A), object)
        avg_losses = numpy.zeros((R, L, A))
        for out in riskmodel.gen_outputs(ri, monitor):
            r = out.rlzi
            for l, loss_type in enumerate(riskmodel.loss_types):
                # loss_curves has shape (A, C)
                for i, asset in enumerate(ri.assets):
                    loss_curves[out.rlzi, l, i] = lc = out[loss_type][i]
                    aid = asset['ordinal']
                    avg = scientific.average_loss(lc)
                    avg_losses[r, l, i] = avg
                    lcurve = (lc['loss'], lc['poe'], avg)
                    result['loss_curves'].append((l, r, aid, lcurve))

        # compute statistics
        for l, loss_type in enumerate(riskmodel.loss_types):
            for i, asset in enumerate(ri.assets):
                avg_stats = compute_stats(avg_losses[:, l, i], stats, weights)
                losses = loss_curves[0, l, i]['loss']
                all_poes = numpy.array(
                    [loss_curves[r, l, i]['poe'] for r in range(R)])
                poes_stats = compute_stats(all_poes, stats, weights)
                result['stat_curves'].append(
                    (l, asset['ordinal'], losses, poes_stats, avg_stats))
    if R == 1:  # the realization is the same as the mean
        del result['loss_curves']
    return result
コード例 #33
0
def classical_risk(riskinputs, crmodel, param, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param crmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param param:
        dictionary of extra parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    """
    result = dict(loss_curves=[], stat_curves=[])
    weights = [w['default'] for w in param['weights']]
    statnames, stats = zip(*param['stats'])
    for ri in riskinputs:
        A = len(ri.assets)
        L = len(crmodel.lti)
        R = ri.hazard_getter.num_rlzs
        loss_curves = numpy.zeros((R, L, A), object)
        avg_losses = numpy.zeros((R, L, A))
        for out in ri.gen_outputs(crmodel, monitor):
            r = out.rlzi
            for l, loss_type in enumerate(crmodel.loss_types):
                # loss_curves has shape (A, C)
                for i, asset in enumerate(ri.assets):
                    loss_curves[out.rlzi, l, i] = lc = out[loss_type][i]
                    aid = asset['ordinal']
                    avg = scientific.average_loss(lc)
                    avg_losses[r, l, i] = avg
                    lcurve = (lc['loss'], lc['poe'], avg)
                    result['loss_curves'].append((l, r, aid, lcurve))

        # compute statistics
        for l, loss_type in enumerate(crmodel.loss_types):
            for i, asset in enumerate(ri.assets):
                avg_stats = compute_stats(avg_losses[:, l, i], stats, weights)
                losses = loss_curves[0, l, i]['loss']
                all_poes = numpy.array(
                    [loss_curves[r, l, i]['poe'] for r in range(R)])
                poes_stats = compute_stats(all_poes, stats, weights)
                result['stat_curves'].append(
                    (l, asset['ordinal'], losses, poes_stats, avg_stats))
    if R == 1:  # the realization is the same as the mean
        del result['loss_curves']
    return result
コード例 #34
0
    def build_agg_loss_curve_and_map(self, losses):
        """
        Build a loss curve from a set of losses with length given by
        the parameter loss_curve_resolution.

        :param losses: a sequence of losses
        :returns: a quartet (losses, poes, avg, loss_map)
        """
        oq = self.oqparam
        clp = oq.conditional_loss_poes
        losses_poes = scientific.event_based(
            losses, tses=oq.tses, time_span=oq.risk_investigation_time or
            oq.investigation_time, curve_resolution=oq.loss_curve_resolution)
        loss_map = scientific.loss_map_matrix(
            clp, [losses_poes]).reshape(len(clp)) if clp else None
        return (losses_poes[0], losses_poes[1],
                scientific.average_loss(losses_poes), loss_map)
コード例 #35
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction(
            'VF', 'PGA',
            [0.001, 0.2, 0.3, 0.5, 0.7],
            [0.01, 0.1, 0.2, 0.4, 0.8],
            [0.0, 0.0, 0.0, 0.0, 0.0])

        epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0)
        loss_ratios = vf.apply_to(gmf[0:2], epsilons)

        values = [3000, 1000]
        insured_limits = [1250., 40.]
        deductibles = [40, 13]

        insured_average_losses = [
            scientific.average_loss(scientific.event_based(
                scientific.insured_losses(
                    lrs,
                    deductibles[i] / values[i], insured_limits[i] / values[i]),
                50, 50, 20))
            for i, lrs in enumerate(loss_ratios)]

        numpy.testing.assert_allclose(
            [207.86489132 / 3000,   38.07815797 / 1000],
            insured_average_losses)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            risk_investigation_time=50,
            hazard_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=True
            )
        out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table,
            {1: 0.20314761658291458,
             2: 0,
             3: 0,
             4: 0,
             5: 0,
             })
コード例 #36
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction('VF', 'PGA',
                                              [0.001, 0.2, 0.3, 0.5, 0.7],
                                              [0.01, 0.1, 0.2, 0.4, 0.8],
                                              [0.0, 0.0, 0.0, 0.0, 0.0])

        epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0)
        loss_ratios = vf.apply_to(gmf[0:2], epsilons)

        values = [3000., 1000.]
        insured_limits = [1250., 40.]
        deductibles = [40., 13.]

        insured_average_losses = [
            scientific.average_loss(
                scientific.event_based(
                    scientific.insured_losses(lrs, deductibles[i] / values[i],
                                              insured_limits[i] / values[i]),
                    50, 50, 20)) for i, lrs in enumerate(loss_ratios)
        ]
        numpy.testing.assert_allclose([0.05667045, 0.02542965],
                                      insured_average_losses)

        wf = workflows.ProbabilisticEventBased(
            'PGA',
            'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=True)
        out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(out.event_loss_table, {
            1: 0.20314761658291458,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
        })
コード例 #37
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction(
            'VF', 'PGA',
            [0.001, 0.2, 0.3, 0.5, 0.7],
            [0.01, 0.1, 0.2, 0.4, 0.8],
            [0.0, 0.0, 0.0, 0.0, 0.0])

        epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0)
        loss_ratios = vf.apply_to(gmf[0:2], epsilons)

        values = [3000., 1000.]
        insured_limits = [1250., 40.]
        deductibles = [40., 13.]

        insured_average_losses = [
            scientific.average_loss(scientific.event_based(
                scientific.insured_losses(
                    lrs,
                    deductibles[i] / values[i], insured_limits[i] / values[i]),
                1, 20))
            for i, lrs in enumerate(loss_ratios)]
        numpy.testing.assert_allclose([0.05667045, 0.02542965],
                                      insured_average_losses)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=True
            )
        wf.riskmodel = mock.MagicMock()
        out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5])
        numpy.testing.assert_almost_equal(
            out.average_losses, [0.00473820568, 0.0047437959417])
        numpy.testing.assert_almost_equal(
            out.average_insured_losses, [0, 0])
コード例 #38
0
    def test_mean_based_with_perfect_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (
            scientific.VulnerabilityFunction(
                'SOME-TAXONOMY', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.01, 0.02, 0.02, 0.01, 0.03]))

        gmvs = [[10., 20., 30., 40., 50.],
                [1., 2., 3., 4., 5.]]

        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=1)
        loss_matrix = vf.apply_to(gmvs, epsilons)
        losses_poes = scientific.event_based(loss_matrix[0], 120, 30, 4)

        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.483041416, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            risk_investigation_time=50,
            hazard_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False
            )
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table,
            {1: 15.232320555463319,
             2: 16.248173683693864,
             3: 15.583030510462981,
             4: 15.177382760499968,
             5: 15.840499250058254,
             })
コード例 #39
0
    def test_mean_based_with_no_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (
            scientific.VulnerabilityFunction(
                'SOME-TAXONOMY', 'PGA',
                [0.001, 0.2, 0.3, 0.5, 0.7],
                [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.01, 0.02, 0.02, 0.01, 0.03]))
        gmvs = numpy.array([[10., 20., 30., 40., 50.],
                            [1., 2., 3., 4., 5.]])

        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0)
        loss_matrix = vf.apply_to(gmvs, epsilons)
        losses_poes = scientific.event_based(
            loss_matrix[0], 120, 30, curve_resolution=4)

        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.500993631, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            risk_investigation_time=50,
            hazard_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False
            )
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table,
            {1: 16.246646231503398,
             2: 15.613885199116158,
             3: 15.669704465134854,
             4: 16.241922530992454,
             5: 16.010104452203464,
             })
コード例 #40
0
    def test_mean_based_with_no_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (scientific.VulnerabilityFunction('SOME-TAXONOMY', 'PGA',
                                               [0.001, 0.2, 0.3, 0.5, 0.7],
                                               [0.01, 0.1, 0.2, 0.4, 0.8],
                                               [0.01, 0.02, 0.02, 0.01, 0.03]))
        gmvs = numpy.array([[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]])

        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=0)
        loss_matrix = vf.apply_to(gmvs, epsilons)
        losses_poes = scientific.event_based(loss_matrix[0],
                                             120,
                                             30,
                                             curve_resolution=4)

        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.500993631, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA',
            'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False)
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table, {
                1: 16.246646231503398,
                2: 15.613885199116158,
                3: 15.669704465134854,
                4: 16.241922530992454,
                5: 16.010104452203464,
            })
コード例 #41
0
    def test_mean_based_with_perfect_correlation(self):
        # This is a regression test. Data has not been checked
        vf = (scientific.VulnerabilityFunction('SOME-TAXONOMY', 'PGA',
                                               [0.001, 0.2, 0.3, 0.5, 0.7],
                                               [0.01, 0.1, 0.2, 0.4, 0.8],
                                               [0.01, 0.02, 0.02, 0.01, 0.03]))

        gmvs = [[10., 20., 30., 40., 50.], [1., 2., 3., 4., 5.]]

        epsilons = scientific.make_epsilons(gmvs, seed=1, correlation=1)
        loss_matrix = vf.apply_to(gmvs, epsilons)
        losses_poes = scientific.event_based(loss_matrix[0], 120, 30, 4)

        first_curve_integral = scientific.average_loss(losses_poes)

        self.assertAlmostEqual(0.483041416, first_curve_integral)

        wf = workflows.ProbabilisticEventBased(
            'PGA',
            'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=False)
        out = wf(self.loss_type, assets, gmvs, epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table, {
                1: 15.232320555463319,
                2: 16.248173683693864,
                3: 15.583030510462981,
                4: 15.177382760499968,
                5: 15.840499250058254,
            })
コード例 #42
0
ファイル: risk.py プロジェクト: amirj700/oq-risklib
def export_rcurves_rlzs(ekey, dstore):
    assetcol = dstore['assetcol/array'].value
    aref = dstore['asset_refs'].value
    rcurves = dstore[ekey[0]]
    [loss_ratios] = dstore['loss_ratios']
    fnames = []
    writercls = (risk_writers.LossCurveGeoJSONWriter
                 if ekey[0] == 'geojson' else risk_writers.LossCurveXMLWriter)
    for writer, (ltype, poe, r, ins) in _gen_writers(dstore, writercls,
                                                     ekey[0]):
        if ltype not in loss_ratios.dtype.names:
            continue  # ignore loss type
        array = rcurves[ltype][:, r, ins]
        curves = []
        for ass, poes in zip(assetcol, array):
            loc = Location(ass['lon'], ass['lat'])
            losses = loss_ratios[ltype] * ass[ltype]
            avg = scientific.average_loss((losses, poes))
            curve = LossCurve(loc, aref[ass['idx']], poes, losses,
                              loss_ratios[ltype], avg, None)
            curves.append(curve)
        writer.serialize(curves)
        fnames.append(writer._dest)
    return sorted(fnames)
コード例 #43
0
ファイル: core.py プロジェクト: larsbutler/oq-engine
    def post_process(self):
        """
          Compute aggregate loss curves and event loss tables
        """
        with EnginePerformanceMonitor('post processing', self.job.id):

            time_span, tses = self.hazard_times()
            for loss_type, event_loss_table in self.event_loss_tables.items():
                for hazard_output in self.rc.hazard_outputs():

                    event_loss = models.EventLoss.objects.create(
                        output=models.Output.objects.create_output(
                            self.job,
                            "Event Loss Table. type=%s, hazard=%s" % (
                                loss_type, hazard_output.id),
                            "event_loss"),
                        loss_type=loss_type,
                        hazard_output=hazard_output)
                    inserter = writer.CacheInserter(models.EventLossData, 9999)

                    ruptures = models.SESRupture.objects.filter(
                        ses__ses_collection__lt_realization=
                        hazard_output.output_container.lt_realization)

                    for rupture in ruptures:
                        if rupture.id in event_loss_table:
                            inserter.add(
                                models.EventLossData(
                                    event_loss_id=event_loss.id,
                                    rupture_id=rupture.id,
                                    aggregate_loss=event_loss_table[
                                        rupture.id]))
                    inserter.flush()

                    aggregate_losses = [
                        event_loss_table[rupture.id]
                        for rupture in ruptures
                        if rupture.id in event_loss_table]

                    if aggregate_losses:
                        aggregate_loss_losses, aggregate_loss_poes = (
                            scientific.event_based(
                                aggregate_losses, tses=tses,
                                time_span=time_span,
                                curve_resolution=self.rc.loss_curve_resolution
                            ))

                        models.AggregateLossCurveData.objects.create(
                            loss_curve=models.LossCurve.objects.create(
                                aggregate=True, insured=False,
                                hazard_output=hazard_output,
                                loss_type=loss_type,
                                output=models.Output.objects.create_output(
                                    self.job,
                                    "aggregate loss curves. "
                                    "loss_type=%s hazard=%s" % (
                                        loss_type, hazard_output),
                                    "agg_loss_curve")),
                            losses=aggregate_loss_losses,
                            poes=aggregate_loss_poes,
                            average_loss=scientific.average_loss(
                                aggregate_loss_losses, aggregate_loss_poes),
                            stddev_loss=numpy.std(aggregate_losses))
コード例 #44
0
ファイル: core.py プロジェクト: julgp/oq-engine
    def post_process(self):
        """
          Compute aggregate loss curves and event loss tables
        """
        with self.monitor('post processing'):
            inserter = writer.CacheInserter(models.EventLossData,
                                            max_cache_size=10000)
            time_span, tses = self.hazard_times()
            for (loss_type, out_id), event_loss_table in self.acc.items():
                if out_id:  # values for individual realizations
                    hazard_output = models.Output.objects.get(pk=out_id)
                    event_loss = models.EventLoss.objects.get(
                        output__oq_job=self.job,
                        output__output_type='event_loss',
                        loss_type=loss_type, hazard_output=hazard_output)
                    if isinstance(hazard_output.output_container,
                                  models.SESCollection):
                        ses_coll = hazard_output.output_container
                        rupture_ids = ses_coll.get_ruptures().values_list(
                            'id', flat=True)
                    else:  # extract the SES collection from the Gmf
                        rupture_ids = models.SESRupture.objects.filter(
                            rupture__ses_collection__trt_model__lt_model=
                            hazard_output.output_container.
                            lt_realization.lt_model).values_list(
                            'id', flat=True)
                    for rupture_id in rupture_ids:
                        if rupture_id in event_loss_table:
                            inserter.add(
                                models.EventLossData(
                                    event_loss_id=event_loss.id,
                                    rupture_id=rupture_id,
                                    aggregate_loss=event_loss_table[
                                        rupture_id]))
                    inserter.flush()

                    aggregate_losses = [
                        event_loss_table[rupture_id]
                        for rupture_id in rupture_ids
                        if rupture_id in event_loss_table]

                    if aggregate_losses:
                        aggregate_loss_losses, aggregate_loss_poes = (
                            scientific.event_based(
                                aggregate_losses, tses=tses,
                                time_span=time_span,
                                curve_resolution=self.rc.loss_curve_resolution
                            ))

                        models.AggregateLossCurveData.objects.create(
                            loss_curve=models.LossCurve.objects.create(
                                aggregate=True, insured=False,
                                hazard_output=hazard_output,
                                loss_type=loss_type,
                                output=models.Output.objects.create_output(
                                    self.job,
                                    "aggregate loss curves. "
                                    "loss_type=%s hazard=%s" % (
                                        loss_type, hazard_output),
                                    "agg_loss_curve")),
                            losses=aggregate_loss_losses,
                            poes=aggregate_loss_poes,
                            average_loss=scientific.average_loss(
                                aggregate_loss_losses, aggregate_loss_poes),
                            stddev_loss=numpy.std(aggregate_losses))
コード例 #45
0
    def post_process(self):
        """
          Compute aggregate loss curves and event loss tables
        """
        with EnginePerformanceMonitor('post processing', self.job.id):

            time_span, tses = self.hazard_times()
            for loss_type, event_loss_table in self.event_loss_tables.items():
                for hazard_output in self.rc.hazard_outputs():

                    event_loss = models.EventLoss.objects.create(
                        output=models.Output.objects.create_output(
                            self.job, "Event Loss Table. type=%s, hazard=%s" %
                            (loss_type, hazard_output.id), "event_loss"),
                        loss_type=loss_type,
                        hazard_output=hazard_output)
                    inserter = writer.CacheInserter(models.EventLossData, 9999)

                    rupture_ids = models.SESRupture.objects.filter(
                        ses__ses_collection__lt_realization=hazard_output.
                        output_container.lt_realization).values_list('id',
                                                                     flat=True)

                    for rupture_id in rupture_ids:
                        if rupture_id in event_loss_table:
                            inserter.add(
                                models.EventLossData(
                                    event_loss_id=event_loss.id,
                                    rupture_id=rupture_id,
                                    aggregate_loss=event_loss_table[rupture_id]
                                ))
                    inserter.flush()

                    aggregate_losses = [
                        event_loss_table[rupture_id]
                        for rupture_id in rupture_ids
                        if rupture_id in event_loss_table
                    ]

                    if aggregate_losses:
                        aggregate_loss_losses, aggregate_loss_poes = (
                            scientific.event_based(
                                aggregate_losses,
                                tses=tses,
                                time_span=time_span,
                                curve_resolution=self.rc.loss_curve_resolution)
                        )

                        models.AggregateLossCurveData.objects.create(
                            loss_curve=models.LossCurve.objects.create(
                                aggregate=True,
                                insured=False,
                                hazard_output=hazard_output,
                                loss_type=loss_type,
                                output=models.Output.objects.create_output(
                                    self.job, "aggregate loss curves. "
                                    "loss_type=%s hazard=%s" %
                                    (loss_type, hazard_output),
                                    "agg_loss_curve")),
                            losses=aggregate_loss_losses,
                            poes=aggregate_loss_poes,
                            average_loss=scientific.average_loss(
                                aggregate_loss_losses, aggregate_loss_poes),
                            stddev_loss=numpy.std(aggregate_losses))
コード例 #46
0
def exposure_statistics(
        loss_curves, map_poes, weights, quantiles, post_processing):
    """
    Compute exposure statistics for N assets and R realizations.

    :param loss_curves:
        a list with N loss curves data. Each item holds a 2-tuple with
        1) the loss ratios on which the curves have been defined on
        2) the poes of the R curves
    :param map_poes:
        a numpy array with P poes used to compute loss maps
    :param weights:
        a list of N weights used to compute mean/quantile weighted statistics
    :param quantiles:
        the quantile levels used to compute quantile results
    :param post_processing:
       a module providing #weighted_quantile_curve, #quantile_curve,
       #mean_curve

    :returns:
        a tuple with six elements:
            *) a numpy array with N mean loss curves
            *) a numpy array with N mean average losses
            *) a numpy array with P x N mean map values
            *) a numpy array with Q x N quantile loss curves
            *) a numpy array with Q x N quantile average loss values
            *) a numpy array with Q x P quantile map values
    """
    curve_resolution = len(loss_curves[0][0])
    map_nr = len(map_poes)

    # Collect per-asset statistic along the last dimension of the
    # following arrays
    mean_curves = numpy.zeros((0, 2, curve_resolution))
    mean_average_losses = numpy.array([])
    mean_maps = numpy.zeros((map_nr, 0))
    quantile_curves = numpy.zeros((len(quantiles), 0, 2, curve_resolution))
    quantile_average_losses = numpy.zeros((len(quantiles), 0,))
    quantile_maps = numpy.zeros((len(quantiles), map_nr, 0))

    for loss_ratios, curves_poes in loss_curves:
        _mean_curve, _mean_maps, _quantile_curves, _quantile_maps = (
            asset_statistics(
                loss_ratios, curves_poes,
                quantiles, weights, map_poes, post_processing))

        mean_curves = numpy.vstack(
            (mean_curves, _mean_curve[numpy.newaxis, :]))
        mean_average_losses = numpy.append(
            mean_average_losses, scientific.average_loss(*_mean_curve))

        mean_maps = numpy.hstack((mean_maps, _mean_maps[:, numpy.newaxis]))
        quantile_curves = numpy.hstack(
            (quantile_curves, _quantile_curves[:, numpy.newaxis]))

        _quantile_average_losses = numpy.array(
            [scientific.average_loss(losses, poes)
             for losses, poes in _quantile_curves])
        quantile_average_losses = numpy.hstack(
            (quantile_average_losses,
             _quantile_average_losses[:, numpy.newaxis]))
        quantile_maps = numpy.dstack(
            (quantile_maps, _quantile_maps[:, :, numpy.newaxis]))

    return (mean_curves, mean_average_losses, mean_maps,
            quantile_curves, quantile_average_losses, quantile_maps)
コード例 #47
0
ファイル: core.py プロジェクト: ChristieHale/oq-engine
    def post_process(self):
        """
          Compute aggregate loss curves and event loss tables
        """
        oq = self.oqparam
        tses = oq.investigation_time * oq.ses_per_logic_tree_path
        with self.monitor('post processing', autoflush=True):
            inserter = writer.CacheInserter(models.EventLossData,
                                            max_cache_size=10000)
            for (loss_type, out_id), event_loss_table in self.acc.items():
                if out_id:  # values for individual realizations
                    hazard_output = models.Output.objects.get(pk=out_id)
                    event_loss = models.EventLoss.objects.get(
                        output__oq_job=self.job,
                        output__output_type='event_loss',
                        loss_type=loss_type, hazard_output=hazard_output)
                    if isinstance(hazard_output.output_container,
                                  models.SESCollection):
                        ses_coll = hazard_output.output_container
                        rupture_ids = ses_coll.get_ruptures().values_list(
                            'id', flat=True)
                    else:  # extract the SES collection from the Gmf
                        rupture_ids = models.SESRupture.objects.filter(
                            rupture__ses_collection__trt_model__lt_model=
                            hazard_output.output_container.
                            lt_realization.lt_model).values_list(
                            'id', flat=True)
                    for rupture_id in rupture_ids:
                        if rupture_id in event_loss_table:
                            inserter.add(
                                models.EventLossData(
                                    event_loss_id=event_loss.id,
                                    rupture_id=rupture_id,
                                    aggregate_loss=event_loss_table[
                                        rupture_id]))
                    inserter.flush()

                    aggregate_losses = [
                        event_loss_table[rupture_id]
                        for rupture_id in rupture_ids
                        if rupture_id in event_loss_table]

                    if aggregate_losses:
                        aggregate_loss = scientific.event_based(
                            aggregate_losses, tses=tses,
                            time_span=oq.investigation_time,
                            curve_resolution=oq.loss_curve_resolution)

                        models.AggregateLossCurveData.objects.create(
                            loss_curve=models.LossCurve.objects.create(
                                aggregate=True, insured=False,
                                hazard_output=hazard_output,
                                loss_type=loss_type,
                                output=models.Output.objects.create_output(
                                    self.job,
                                    "aggregate loss curves. "
                                    "loss_type=%s hazard=%s" % (
                                        loss_type, hazard_output),
                                    "agg_loss_curve")),
                            losses=aggregate_loss[0],
                            poes=aggregate_loss[1],
                            average_loss=scientific.average_loss(
                                aggregate_loss),
                            stddev_loss=numpy.std(aggregate_losses))