예제 #1
0
    def mean_loss_ratios_with_steps(self, steps):
        """
        Split the mean loss ratios, producing a new set of loss ratios. The new
        set of loss ratios always includes 0.0 and 1.0

        :param int steps:
            the number of steps we make to go from one loss
            ratio to the next. For example, if we have [0.5, 0.7]::

             steps = 1 produces [0.0,  0.5, 0.7, 1]
             steps = 2 produces [0.0, 0.25, 0.5, 0.6, 0.7, 0.85, 1]
             steps = 3 produces [0.0, 0.17, 0.33, 0.5, 0.57, 0.63,
                                 0.7, 0.8, 0.9, 1]
        """
        loss_ratios = self.mean_loss_ratios

        min_lr = min(loss_ratios)
        max_lr = max(loss_ratios)

        if min_lr > 0.0:
            # prepend with a zero
            loss_ratios = numpy.concatenate([[0.0], loss_ratios])

        if max_lr < 1.0:
            # append a 1.0
            loss_ratios = numpy.concatenate([loss_ratios, [1.0]])

        ls = numpy.concatenate([numpy.linspace(x, y, num=steps + 1)[:-1]
                                for x, y in utils.pairwise(loss_ratios)])
        return numpy.concatenate([ls, [loss_ratios[-1]]])
예제 #2
0
파일: core.py 프로젝트: kenxshao/oq-engine
def do_classical_bcr(loss_type, units, containers, params, profile):
    for unit_orig, unit_retro in utils.pairwise(units):
        with profile('getting hazard'):
            assets, hazard_curves = unit_orig.getter()
            _, hazard_curves_retrofitted = unit_retro.getter()

        with profile('computing bcr'):
            original_loss_curves = unit_orig.calc(hazard_curves)
            retrofitted_loss_curves = unit_retro.calc(
                hazard_curves_retrofitted)

            eal_original = [
                scientific.average_loss(losses, poes)
                for losses, poes in original_loss_curves]

            eal_retrofitted = [
                scientific.average_loss(losses, poes)
                for losses, poes in retrofitted_loss_curves]

            bcr_results = [
                scientific.bcr(
                    eal_original[i], eal_retrofitted[i],
                    params.interest_rate, params.asset_life_expectancy,
                    asset.value(loss_type), asset.retrofitted(loss_type))
                for i, asset in enumerate(assets)]

        with logs.tracing('writing results'):
            containers.write(
                assets, zip(eal_original, eal_retrofitted, bcr_results),
                output_type="bcr_distribution",
                loss_type=loss_type,
                hazard_output_id=unit_orig.getter.hazard_output.id)
예제 #3
0
def fine_graining(points, steps):
    """
    :param points: a list of floats
    :param int steps: expansion steps (>= 2)

    >>> fine_graining([0, 1], steps=0)
    [0, 1]
    >>> fine_graining([0, 1], steps=1)
    [0, 1]
    >>> fine_graining([0, 1], steps=2)
    array([0. , 0.5, 1. ])
    >>> fine_graining([0, 1], steps=3)
    array([0.        , 0.33333333, 0.66666667, 1.        ])
    >>> fine_graining([0, 0.5, 0.7, 1], steps=2)
    array([0.  , 0.25, 0.5 , 0.6 , 0.7 , 0.85, 1.  ])

    N points become S * (N - 1) + 1 points with S > 0
    """
    if steps < 2:
        return points
    ls = numpy.concatenate([
        numpy.linspace(x, y, num=steps + 1)[:-1]
        for x, y in utils.pairwise(points)
    ])
    return numpy.concatenate([ls, [points[-1]]])
예제 #4
0
    def mean_imls(self):
        """
        Compute the mean IMLs (Intensity Measure Level)
        for the given vulnerability function.

        :param vulnerability_function: the vulnerability function where
            the IMLs (Intensity Measure Level) are taken from.
        :type vuln_function:
           :py:class:`openquake.risklib.vulnerability_function.\
           VulnerabilityFunction`
        """
        return ([max(0, self.imls[0] - ((self.imls[1] - self.imls[0]) / 2))] +
                [numpy.mean(pair) for pair in utils.pairwise(self.imls)] +
                [self.imls[-1] + ((self.imls[-1] - self.imls[-2]) / 2)])
예제 #5
0
    def mean_imls(self):
        """
        Compute the mean IMLs (Intensity Measure Level)
        for the given vulnerability function.

        :param vulnerability_function: the vulnerability function where
            the IMLs (Intensity Measure Level) are taken from.
        :type vuln_function:
           :py:class:`openquake.risklib.vulnerability_function.\
           VulnerabilityFunction`
        """
        return numpy.array(
            [max(0, self.imls[0] - (self.imls[1] - self.imls[0]) / 2.)] +
            [numpy.mean(pair) for pair in utils.pairwise(self.imls)] +
            [self.imls[-1] + (self.imls[-1] - self.imls[-2]) / 2.])
예제 #6
0
    def test_init(self):
        assets_num = 100
        samples_num = 1000
        correlation = 0.37
        self.dist.init(assets_num, samples_num, correlation=correlation,
                       seed=17)

        tol = 0.1
        for a1, a2 in utils.pairwise(range(assets_num)):
            coeffs = numpy.corrcoef(
                self.dist.epsilons[a1, :], self.dist.epsilons[a2, :])

            numpy.testing.assert_allclose([1, 1], [coeffs[0, 0], coeffs[1, 1]])
            numpy.testing.assert_allclose(
                correlation, coeffs[0, 1], rtol=0, atol=tol)
            numpy.testing.assert_allclose(
                correlation, coeffs[1, 0], rtol=0, atol=tol)
예제 #7
0
    def test_init(self):
        assets_num = 100
        samples_num = 1000
        correlation = 0.37
        epsilons = scientific.make_epsilons(
            numpy.zeros((assets_num, samples_num)),
            seed=17, correlation=correlation)
        self.dist = scientific.LogNormalDistribution(epsilons)

        tol = 0.1
        for a1, a2 in utils.pairwise(range(assets_num)):
            coeffs = numpy.corrcoef(
                self.dist.epsilons[a1, :], self.dist.epsilons[a2, :])

            numpy.testing.assert_allclose([1, 1], [coeffs[0, 0], coeffs[1, 1]])
            numpy.testing.assert_allclose(
                correlation, coeffs[0, 1], rtol=0, atol=tol)
            numpy.testing.assert_allclose(
                correlation, coeffs[1, 0], rtol=0, atol=tol)
예제 #8
0
    def test_init(self):
        assets_num = 100
        samples_num = 1000
        correlation = 0.37
        epsilons = scientific.make_epsilons(
            numpy.zeros((assets_num, samples_num)),
            seed=17, correlation=correlation)
        self.dist = scientific.LogNormalDistribution(epsilons)

        tol = 0.1
        for a1, a2 in utils.pairwise(range(assets_num)):
            coeffs = numpy.corrcoef(
                self.dist.epsilons[a1, :], self.dist.epsilons[a2, :])

            numpy.testing.assert_allclose([1, 1], [coeffs[0, 0], coeffs[1, 1]])
            numpy.testing.assert_allclose(
                correlation, coeffs[0, 1], rtol=0, atol=tol)
            numpy.testing.assert_allclose(
                correlation, coeffs[1, 0], rtol=0, atol=tol)
예제 #9
0
def _evenly_spaced_loss_ratios(loss_ratios, steps, first=(), last=()):
    """
    Split the loss ratios, producing a new set of loss ratios.

    :param loss_ratios: the loss ratios to split.
    :type loss_ratios: list of floats
    :param int steps: the number of steps we make to go from one loss
        ratio to the next. For example, if we have [1.0, 2.0]:

        steps = 1 produces [1.0, 2.0]
        steps = 2 produces [1.0, 1.5, 2.0]
        steps = 3 produces [1.0, 1.33, 1.66, 2.0]
    :param first: optional array of ratios to put first (ex. [0.0])
    :param last: optional array of ratios to put last (ex. [1.0])
    """
    loss_ratios = numpy.concatenate([first, loss_ratios, last])
    ls = numpy.concatenate([numpy.linspace(x, y, num=steps + 1)[:-1]
                            for x, y in utils.pairwise(loss_ratios)])
    return numpy.concatenate([ls, [loss_ratios[-1]]])
예제 #10
0
파일: core.py 프로젝트: kenxshao/oq-engine
def do_event_based_bcr(loss_type, units, containers, params, profile):
    """
    See `event_based_bcr` for docstring
    """
    for unit_orig, unit_retro in utils.pairwise(units):

        with profile("getting hazard"):
            assets, (gmvs, _) = unit_orig.getter()
            if len(assets) == 0:
                logs.LOG.info("Exit from task as no asset could be processed")
                return

            _, (gmvs_retro, _) = unit_retro.getter()

        with profile("computing bcr"):
            _, original_loss_curves = unit_orig.calc(gmvs)
            _, retrofitted_loss_curves = unit_retro.calc(gmvs_retro)

            eal_original = [scientific.average_loss(losses, poes) for losses, poes in original_loss_curves]

            eal_retrofitted = [scientific.average_loss(losses, poes) for losses, poes in retrofitted_loss_curves]

            bcr_results = [
                scientific.bcr(
                    eal_original[i],
                    eal_retrofitted[i],
                    params.interest_rate,
                    params.asset_life_expectancy,
                    asset.value(loss_type),
                    asset.retrofitted(loss_type),
                )
                for i, asset in enumerate(assets)
            ]

        with profile("writing results"):
            containers.write(
                assets,
                zip(eal_original, eal_retrofitted, bcr_results),
                output_type="bcr_distribution",
                loss_type=loss_type,
                hazard_output_id=unit_orig.getter.hazard_output.id,
            )
예제 #11
0
def event_based(loss_values, tses, time_span,
                curve_resolution=DEFAULT_CURVE_RESOLUTION):
    """
    Compute a loss (or loss ratio) curve.

    :param loss_values: The loss ratios (or the losses) computed by
    applying the vulnerability function

    :param tses: Time representative of the stochastic event set

    :param time_span: Investigation Time spanned by the hazard input

    :param curve_resolution: The number of points the output curve is
    defined by
    """
    sorted_loss_values = numpy.sort(loss_values)[::-1]

    # We compute the rates of exceedances by iterating over loss
    # values and counting the number of distinct loss values less than
    # the current loss. This is a workaround for a rounding error, ask Luigi
    # for the details
    times = [index
             for index, (previous_val, val) in
             enumerate(utils.pairwise(sorted_loss_values))
             if not numpy.allclose([val], [previous_val])]

    # if there are less than 2 distinct loss values, we will keep the
    # endpoints
    if len(times) < 2:
        times = [0, len(sorted_loss_values) - 1]

    sorted_loss_values = sorted_loss_values[times]
    rates_of_exceedance = numpy.array(times) / float(tses)

    poes = 1 - numpy.exp(-rates_of_exceedance * time_span)
    reference_poes = numpy.linspace(poes.min(), poes.max(), curve_resolution)

    values = interpolate.interp1d(poes, sorted_loss_values)(reference_poes)

    return curve.Curve(zip(values, reference_poes))
예제 #12
0
def fine_graining(points, steps):
    """
    :param points: a list of floats
    :param int steps: expansion steps (>= 2)

    >>> fine_graining([0, 1], steps=0)
    [0, 1]
    >>> fine_graining([0, 1], steps=1)
    [0, 1]
    >>> fine_graining([0, 1], steps=2)
    array([ 0. ,  0.5,  1. ])
    >>> fine_graining([0, 1], steps=3)
    array([ 0.        ,  0.33333333,  0.66666667,  1.        ])
    >>> fine_graining([0, 0.5, 0.7, 1], steps=2)
    array([ 0.  ,  0.25,  0.5 ,  0.6 ,  0.7 ,  0.85,  1.  ])

    N points become S * (N - 1) + 1 points with S > 0
    """
    if steps < 2:
        return points
    ls = numpy.concatenate([numpy.linspace(x, y, num=steps + 1)[:-1]
                            for x, y in utils.pairwise(points)])
    return numpy.concatenate([ls, [points[-1]]])
예제 #13
0
def pairwise_diff(values):
    "Differences between a value and the next value in a sequence"
    return numpy.array([x - y for x, y in utils.pairwise(values)])
예제 #14
0
def pairwise_mean(values):
    "Averages between a value and the next value in a sequence"
    return numpy.array([numpy.mean(pair) for pair in utils.pairwise(values)])
예제 #15
0
def pairwise_diff(values):
    "Differences between a value and the next value in a sequence"
    return numpy.array([x - y for x, y in utils.pairwise(values)])
예제 #16
0
def pairwise_mean(values):
    "Averages between a value and the next value in a sequence"
    return numpy.array([numpy.mean(pair) for pair in utils.pairwise(values)])