Exemplo n.º 1
0
 def test_in_range(self):
     numpy.testing.assert_allclose(
         [0.2],
         scientific.insured_losses(numpy.array([0.3]), 0.1, 0.5))
     numpy.testing.assert_allclose(
         [0.2, 0.3],
         scientific.insured_losses(numpy.array([0.3, 0.4]), 0.1, 0.5))
Exemplo n.º 2
0
 def test_below_deductible(self):
     numpy.testing.assert_allclose([0],
                                   scientific.insured_losses(
                                       numpy.array([0.05]), 0.1, 1))
     numpy.testing.assert_allclose([0, 0],
                                   scientific.insured_losses(
                                       numpy.array([0.05, 0.1]), 0.1, 1))
Exemplo n.º 3
0
 def test_above_limit(self):
     numpy.testing.assert_allclose([0.4],
                                   scientific.insured_losses(
                                       numpy.array([0.6]), 0.1, 0.5))
     numpy.testing.assert_allclose([0.4, 0.4],
                                   scientific.insured_losses(
                                       numpy.array([0.6, 0.7]), 0.1, 0.5))
Exemplo n.º 4
0
 def test_in_range(self):
     numpy.testing.assert_allclose([0.2],
                                   scientific.insured_losses(
                                       numpy.array([0.3]), 0.1, 0.5))
     numpy.testing.assert_allclose([0.2, 0.3],
                                   scientific.insured_losses(
                                       numpy.array([0.3, 0.4]), 0.1, 0.5))
Exemplo n.º 5
0
 def test_above_limit(self):
     numpy.testing.assert_allclose(
         [0.4],
         scientific.insured_losses(numpy.array([0.6]), 0.1, 0.5))
     numpy.testing.assert_allclose(
         [0.4, 0.4],
         scientific.insured_losses(numpy.array([0.6, 0.7]), 0.1, 0.5))
Exemplo n.º 6
0
 def test_below_deductible(self):
     numpy.testing.assert_allclose(
         [0],
         scientific.insured_losses(numpy.array([0.05]), 0.1, 1))
     numpy.testing.assert_allclose(
         [0, 0],
         scientific.insured_losses(numpy.array([0.05, 0.1]), 0.1, 1))
Exemplo n.º 7
0
 def test_mean(self):
     losses1 = numpy.array([0.05, 0.2, 0.6])
     losses2 = numpy.array([0.01, 0.1, 0.3, 0.55])
     l1 = len(losses1)
     l2 = len(losses2)
     m1 = scientific.insured_losses(losses1, 0.1, 0.5).mean()
     m2 = scientific.insured_losses(losses2, 0.1, 0.5).mean()
     m = scientific.insured_losses(numpy.concatenate([losses1, losses2]),
                                   0.1, 0.5).mean()
     numpy.testing.assert_allclose((m1 * l1 + m2 * l2) / (l1 + l2), m)
Exemplo n.º 8
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction(
            [0.001, 0.2, 0.3, 0.5, 0.7],
            [0.01, 0.1, 0.2, 0.4, 0.8],
            [0.0, 0.0, 0.0, 0.0, 0.0],
            "LN")

        loss_ratios = scientific.vulnerability_function_applier(
            vf, gmf[0:2])

        values = [3000, 1000]
        insured_limits = [1250., 40.]
        deductibles = [40, 13]

        insured_average_losses = [
            scientific.average_loss(*scientific.event_based(
                scientific.insured_losses(
                    loss_ratios,
                    deductibles[i] / values[i], insured_limits[i] / values[i]),
                50, 50, 20))
            for i, loss_ratios in enumerate(loss_ratios)]

        numpy.testing.assert_allclose(
            [207.86489132 / 3000,   38.07815797 / 1000],
            insured_average_losses)
Exemplo n.º 9
0
 def __call__(self, loss_type, assets, gmvs_eids, epsgetter):
     """
     :param str loss_type:
         the loss type considered
     :param assets:
        a list of assets on the same site and with the same taxonomy
     :param gmvs_eids:
        a pair (gmvs, eids) with E values each
     :param epsgetter:
        a callable returning the correct epsilons for the given gmvs
     :returns:
         a :class:
         `openquake.risklib.scientific.ProbabilisticEventBased.Output`
         instance.
     """
     gmvs, eids = gmvs_eids
     E = len(gmvs)
     I = self.insured_losses + 1
     A = len(assets)
     loss_ratios = numpy.zeros((A, E, I), F32)
     vf = self.risk_functions[loss_type]
     means, covs, idxs = vf.interpolate(gmvs)
     for i, asset in enumerate(assets):
         epsilons = epsgetter(asset.ordinal, eids)
         ratios = vf.sample(means, covs, idxs, epsilons)
         loss_ratios[i, idxs, 0] = ratios
         if self.insured_losses and loss_type != 'occupants':
             loss_ratios[i, idxs, 1] = scientific.insured_losses(
                 ratios, asset.deductible(loss_type),
                 asset.insurance_limit(loss_type))
     return loss_ratios
Exemplo n.º 10
0
 def __call__(self, loss_type, assets, gmvs_eids, epsgetter):
     """
     :param str loss_type:
         the loss type considered
     :param assets:
        a list of assets on the same site and with the same taxonomy
     :param gmvs_eids:
        a composite array of E elements with fields 'gmv' and 'eid'
     :param epsgetter:
        a callable returning the correct epsilons for the given gmvs
     :returns:
         a :class:
         `openquake.risklib.scientific.ProbabilisticEventBased.Output`
         instance.
     """
     gmvs, eids = gmvs_eids['gmv'], gmvs_eids['eid']
     E = len(gmvs)
     I = self.insured_losses + 1
     N = len(assets)
     loss_ratios = numpy.zeros((N, E, I), F32)
     vf = self.risk_functions[loss_type]
     means, covs, idxs = vf.interpolate(gmvs)
     for i, asset in enumerate(assets):
         epsilons = epsgetter(asset.ordinal, eids)
         if epsilons is not None:
             ratios = vf.sample(means, covs, idxs, epsilons)
         else:
             ratios = means
         loss_ratios[i, idxs, 0] = ratios
         if self.insured_losses and loss_type != 'occupants':
             loss_ratios[i, idxs, 1] = scientific.insured_losses(
                 ratios,  asset.deductible(loss_type),
                 asset.insurance_limit(loss_type))
     return scientific.Output(
         assets, loss_type, loss_ratios=loss_ratios, eids=eids)
Exemplo n.º 11
0
def do_scenario(loss_type, unit, containers, params, profile):
    """
    See `scenario` for a description of the input parameters
    """

    with profile('getting hazard'):
        assets, ground_motion_values = unit.getter()

    if not len(assets):
        logs.LOG.info("Exit from task as no asset could be processed")
        return None, None

    with profile('computing risk'):
        loss_ratio_matrix = unit.calc(ground_motion_values)

        if params.insured_losses:
            insured_loss_matrix = [
                scientific.insured_losses(
                    loss_ratio_matrix[i], asset.value(loss_type),
                    asset.deductible(loss_type),
                    asset.insurance_limit(loss_type))
                for i, asset in enumerate(assets)]

    with profile('saving risk outputs'):
        containers.write(
            assets,
            [losses.mean() for losses in loss_ratio_matrix],
            [losses.std(ddof=1) for losses in loss_ratio_matrix],
            output_type="loss_map",
            loss_type=loss_type,
            hazard_output_id=unit.getter.hazard_output.id,
            insured=False)

        if params.insured_losses:
            containers.write(
                assets,
                [losses.mean() for losses in insured_loss_matrix],
                [losses.std(ddof=1) for losses in insured_loss_matrix],
                itertools.cycle([True]),
                output_type="loss_map",
                loss_type=loss_type,
                hazard_output_id=unit.getter.hazard_output.id,
                insured=True)

    aggregate_losses = sum(loss_ratio_matrix[i] * asset.value(loss_type)
                           for i, asset in enumerate(assets))

    if params.insured_losses:
        insured_losses = (
            numpy.array(insured_loss_matrix).transpose().sum(axis=1))
    else:
        insured_losses = "Not computed"

    return aggregate_losses, insured_losses
Exemplo n.º 12
0
def insured_losses(loss_type, unit, assets, loss_ratio_matrix):
    for asset, losses in zip(assets, loss_ratio_matrix):
        asset_insured_losses, poes = scientific.event_based(
            scientific.insured_losses(
                losses,
                asset.value(loss_type),
                asset.deductible(loss_type),
                asset.insurance_limit(loss_type)),
            tses=unit.calc.tses,
            time_span=unit.calc.time_span)
        # FIXME(lp). Insured losses are still computed as absolute
        # values.
        yield asset_insured_losses / asset.value(loss_type), poes
Exemplo n.º 13
0
    def __call__(self, loss_type, assets, ground_motion_values, epsilons,
                 eids):
        """
        :param str loss_type: the loss type considered

        :param assets:
           a list with a single asset

        :param ground_motion_values:
           an array of E ground_motion_values

        :param epsilons:
           a list with a single array of E stochastic values

        :param eids:
           a numpy array of E rupture IDs

        :returns:
            a :class:
            `openquake.risklib.scientific.ProbabilisticEventBased.Output`
            instance.
        """
        E = len(eids)
        I = self.insured_losses + 1
        loss_ratios = numpy.zeros((E, I), F32)
        asset = assets[0]  # the only one
        loss_ratios[:, 0] = ratios = self.risk_functions[loss_type].apply_to(
            [ground_motion_values], epsilons)[0]  # shape E
        cb = self.compositemodel.curve_builders[
            self.compositemodel.lti[loss_type]]
        if self.insured_losses and loss_type != 'occupants':
            deductible = asset.deductible(loss_type)
            limit = asset.insurance_limit(loss_type)
            ilm = scientific.insured_losses(ratios, deductible, limit)
            loss_ratios[:, 1] = ilm
            icounts = cb.build_counts(ilm)
        else:
            # FIXME: ugly workaround for qa_tests.event_based_test; in Ubuntu
            # 12.04 MagicMock does not work well, so len(cb.ratios) gives error
            nratios = 1 if isinstance(cb, mock.Mock) else len(cb.ratios)
            icounts = numpy.empty(nratios)
            icounts.fill(numpy.nan)
        return scientific.Output(assets,
                                 loss_type,
                                 losses=loss_ratios * asset.value(loss_type),
                                 average_loss=loss_ratios.sum(axis=0) *
                                 self.ses_ratio,
                                 counts_matrix=cb.build_counts(loss_ratios),
                                 insured_counts_matrix=icounts,
                                 eids=eids)
Exemplo n.º 14
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction(
            'VF', 'PGA',
            [0.001, 0.2, 0.3, 0.5, 0.7],
            [0.01, 0.1, 0.2, 0.4, 0.8],
            [0.0, 0.0, 0.0, 0.0, 0.0])

        epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0)
        loss_ratios = vf.apply_to(gmf[0:2], epsilons)

        values = [3000, 1000]
        insured_limits = [1250., 40.]
        deductibles = [40, 13]

        insured_average_losses = [
            scientific.average_loss(scientific.event_based(
                scientific.insured_losses(
                    lrs,
                    deductibles[i] / values[i], insured_limits[i] / values[i]),
                50, 50, 20))
            for i, lrs in enumerate(loss_ratios)]

        numpy.testing.assert_allclose(
            [207.86489132 / 3000,   38.07815797 / 1000],
            insured_average_losses)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            risk_investigation_time=50,
            hazard_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=True
            )
        out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(
            out.event_loss_table,
            {1: 0.20314761658291458,
             2: 0,
             3: 0,
             4: 0,
             5: 0,
             })
Exemplo n.º 15
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction('VF', 'PGA',
                                              [0.001, 0.2, 0.3, 0.5, 0.7],
                                              [0.01, 0.1, 0.2, 0.4, 0.8],
                                              [0.0, 0.0, 0.0, 0.0, 0.0])

        epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0)
        loss_ratios = vf.apply_to(gmf[0:2], epsilons)

        values = [3000., 1000.]
        insured_limits = [1250., 40.]
        deductibles = [40., 13.]

        insured_average_losses = [
            scientific.average_loss(
                scientific.event_based(
                    scientific.insured_losses(lrs, deductibles[i] / values[i],
                                              insured_limits[i] / values[i]),
                    50, 50, 20)) for i, lrs in enumerate(loss_ratios)
        ]
        numpy.testing.assert_allclose([0.05667045, 0.02542965],
                                      insured_average_losses)

        wf = workflows.ProbabilisticEventBased(
            'PGA',
            'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=True)
        out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5])
        self.assert_similar(out.event_loss_table, {
            1: 0.20314761658291458,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
        })
Exemplo n.º 16
0
    def test_insured_loss_mean_based(self):
        vf = scientific.VulnerabilityFunction(
            'VF', 'PGA',
            [0.001, 0.2, 0.3, 0.5, 0.7],
            [0.01, 0.1, 0.2, 0.4, 0.8],
            [0.0, 0.0, 0.0, 0.0, 0.0])

        epsilons = scientific.make_epsilons(gmf[0:2], seed=1, correlation=0)
        loss_ratios = vf.apply_to(gmf[0:2], epsilons)

        values = [3000., 1000.]
        insured_limits = [1250., 40.]
        deductibles = [40., 13.]

        insured_average_losses = [
            scientific.average_loss(scientific.event_based(
                scientific.insured_losses(
                    lrs,
                    deductibles[i] / values[i], insured_limits[i] / values[i]),
                1, 20))
            for i, lrs in enumerate(loss_ratios)]
        numpy.testing.assert_allclose([0.05667045, 0.02542965],
                                      insured_average_losses)

        wf = workflows.ProbabilisticEventBased(
            'PGA', 'SOME-TAXONOMY',
            vulnerability_functions={self.loss_type: vf},
            investigation_time=50,
            risk_investigation_time=50,
            ses_per_logic_tree_path=200,
            number_of_logic_tree_samples=0,
            loss_curve_resolution=4,
            conditional_loss_poes=[0.1, 0.5, 0.9],
            insured_losses=True
            )
        wf.riskmodel = mock.MagicMock()
        out = wf(self.loss_type, assets, gmf[0:2], epsilons, [1, 2, 3, 4, 5])
        numpy.testing.assert_almost_equal(
            out.average_losses, [0.00473820568, 0.0047437959417])
        numpy.testing.assert_almost_equal(
            out.average_insured_losses, [0, 0])
Exemplo n.º 17
0
    def test_insured_loss_mean_based(self):
        vulnerability_function_rm = (
            scientific.VulnerabilityFunction(
                [0.001, 0.2, 0.3, 0.5, 0.7], [0.01, 0.1, 0.2, 0.4, 0.8],
                [0.0, 0.0, 0.0, 0.0, 0.0], "LN"))

        vulnerability_function_rc = (
            scientific.VulnerabilityFunction(
                [0.001, 0.2, 0.3, 0.5, 0.7], [0.0035, 0.07, 0.14, 0.28, 0.56],
                [0.0, 0.0, 0.0, 0.0, 0.0], "LN"))

        calculator_rm = api.ProbabilisticEventBased(
            vulnerability_function_rm, time_span=50, tses=50,
            curve_resolution=20)

        calculator_rc = api.ProbabilisticEventBased(
            vulnerability_function_rc, time_span=50, tses=50,
            curve_resolution=20)

        loss_ratios_rm, _curves_rm = calculator_rm(gmf[0:2])
        loss_ratios_rc, [_curve_rc] = calculator_rc([gmf[2]])

        values = [3000, 1000, 2000]
        insured_limits = [1250., 40., 500.]
        deductibles = [40, 13, 15]

        insured_losses = [scientific.event_based(
            scientific.insured_losses(
                loss_ratios, values[i], deductibles[i], insured_limits[i]),
            50, 50, 20)
            for i, loss_ratios in enumerate(loss_ratios_rm + loss_ratios_rc)]

        for i, insured_loss_curve in enumerate(insured_losses):
            numpy.testing.assert_allclose(
                il.expected_poes[i], insured_loss_curve.ordinates, rtol=10E-5)

            numpy.testing.assert_allclose(
                il.expected_losses[i],
                insured_loss_curve.abscissae, rtol=10E-5)
Exemplo n.º 18
0
 def test_mixed(self):
     numpy.testing.assert_allclose([0, 0.1, 0.4],
                                   scientific.insured_losses(
                                       numpy.array([0.05, 0.2, 0.6]), 0.1,
                                       0.5))
Exemplo n.º 19
0
def scenario(job_id, hazard, seed, vulnerability_function, output_containers,
             insured_losses, asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmfscenario') to a tuple where the first element is an instance of
      :class:`..hazard_getters.GroundMotionScenarioGetter`, and the second
      element is the corresponding weight.
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param bool insured_losses: True if also insured losses should be computed
    :param asset_correlation: asset correlation coefficient
    """

    calc = api.Scenario(vulnerability_function, seed, asset_correlation)

    hazard_getter = hazard.values()[0][0]

    with EnginePerformanceMonitor('hazard_getter', job_id, scenario):
        assets, ground_motion_values, missings = hazard_getter()

    if not len(assets):
        logs.LOG.info("Exit from task as no asset could be processed")
        base.signal_task_complete(job_id=job_id,
                                  aggregate_losses=None,
                                  insured_aggregate_losses=None,
                                  num_items=len(missings))
        return

    with logs.tracing('computing risk'):
        loss_ratio_matrix = calc(ground_motion_values)

        if insured_losses:
            insured_loss_matrix = [
                scientific.insured_losses(
                    loss_ratio_matrix[i], asset.value,
                    asset.deductible, asset.ins_limit)
                for i, asset in enumerate(assets)]

    # There is only one output container list as there is no support
    # for hazard logic tree
    output_containers = output_containers.values()[0]

    loss_map_id = output_containers[0]

    if insured_losses:
        insured_loss_map_id = output_containers[1]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, asset in enumerate(assets):
            general.write_loss_map_data(
                loss_map_id, asset,
                loss_ratio_matrix[i].mean(),
                std_dev=loss_ratio_matrix[i].std(ddof=1))

            if insured_losses:
                general.write_loss_map_data(
                    insured_loss_map_id, asset,
                    insured_loss_matrix[i].mean() / asset.value,
                    std_dev=insured_loss_matrix[i].std(ddof=1) / asset.value)

    aggregate_losses = sum(loss_ratio_matrix[i] * asset.value
                           for i, asset in enumerate(assets))

    if insured_losses:
        insured_aggregate_losses = (
            numpy.array(insured_loss_matrix).transpose().sum(axis=1))
    else:
        insured_aggregate_losses = "Not computed"

    base.signal_task_complete(
        job_id=job_id,
        num_items=len(assets) + len(missings),
        aggregate_losses=aggregate_losses,
        insured_aggregate_losses=insured_aggregate_losses)
Exemplo n.º 20
0
def event_based_risk(riskinputs, riskmodel, param, monitor):
    """
    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param param:
        a dictionary of parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :returns:
        a dictionary of numpy arrays of shape (L, R)
    """
    I = param['insured_losses'] + 1
    L = len(riskmodel.lti)
    for ri in riskinputs:
        with monitor('getting hazard'):
            ri.hazard_getter.init()
            hazard = ri.hazard_getter.get_hazard()
        mon = monitor('build risk curves', measuremem=False)
        A = len(ri.aids)
        R = ri.hazard_getter.num_rlzs
        try:
            avg = numpy.zeros((A, R, L * I), F32)
        except MemoryError:
            raise MemoryError('Building array avg of shape (%d, %d, %d)' %
                              (A, R, L * I))
        result = dict(aids=ri.aids, avglosses=avg)
        acc = AccumDict()  # accumulator eidx -> agglosses
        aid2idx = {aid: idx for idx, aid in enumerate(ri.aids)}
        if 'builder' in param:
            builder = param['builder']
            P = len(builder.return_periods)
            all_curves = numpy.zeros((A, R, P), builder.loss_dt)
        # update the result dictionary and the agg array with each output
        for out in riskmodel.gen_outputs(ri, monitor, hazard):
            if len(out.eids) == 0:  # this happens for sites with no events
                continue
            r = out.rlzi
            agglosses = numpy.zeros((len(out.eids), L * I), F32)
            for l, loss_ratios in enumerate(out):
                if loss_ratios is None:  # for GMFs below the minimum_intensity
                    continue
                loss_type = riskmodel.loss_types[l]
                ins = param['insured_losses'] and loss_type != 'occupants'
                for a, asset in enumerate(out.assets):
                    aval = asset.value(loss_type)
                    aid = asset.ordinal
                    idx = aid2idx[aid]
                    ratios = loss_ratios[a]  # length E

                    # average losses
                    avg[idx, r,
                        l] = (ratios.sum(axis=0) * param['ses_ratio'] * aval)

                    # agglosses
                    agglosses[:, l] += ratios * aval

                    if ins:
                        iratios = scientific.insured_losses(
                            ratios, asset.deductible(loss_type),
                            asset.insurance_limit(loss_type))
                        avg[idx, r, l + L] = (iratios.sum(axis=0) *
                                              param['ses_ratio'] * aval)
                        agglosses[:, l + L] += iratios * aval
                    if 'builder' in param:
                        with mon:  # this is the heaviest part
                            all_curves[idx,
                                       r][loss_type] = (builder.build_curve(
                                           aval, ratios, r))
                            if ins:
                                lt = loss_type + '_ins'
                                all_curves[idx, r][lt] = builder.build_curve(
                                    aval, iratios, r)

            # NB: I could yield the agglosses per output, but then I would
            # have millions of small outputs with big data transfer and slow
            # saving time
            acc += dict(zip(out.eids, agglosses))

        if 'builder' in param:
            clp = param['conditional_loss_poes']
            result['curves-rlzs'], result['curves-stats'] = builder.pair(
                all_curves, param['stats'])
            if R > 1 and param['individual_curves'] is False:
                del result['curves-rlzs']
            if clp:
                result['loss_maps-rlzs'], result['loss_maps-stats'] = (
                    builder.build_maps(all_curves, clp, param['stats']))
                if R > 1 and param['individual_curves'] is False:
                    del result['loss_maps-rlzs']

        # store info about the GMFs, must be done at the end
        result['agglosses'] = (numpy.array(list(acc)),
                               numpy.array(list(acc.values())))
        yield result
Exemplo n.º 21
0
 def test_mixed(self):
     numpy.testing.assert_allclose(
         [0, 0.1, 0.4],
         scientific.insured_losses(numpy.array([0.05, 0.2, 0.6]), 0.1, 0.5))
Exemplo n.º 22
0
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    loss_ratio_curves = OrderedDict()
    event_loss_table = dict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        with logs.tracing('getting input data from db'):
            assets, gmvs_ruptures, missings = hazard_getter()

        if len(assets):
            ground_motion_values = numpy.array(gmvs_ruptures)[:, 0]
            rupture_id_matrix = numpy.array(gmvs_ruptures)[:, 1]
        else:
            # we are relying on the fact that if all the hazard_getter
            # in this task will either return some results or they all
            # return an empty result set.
            logs.LOG.info("Exit from task as no asset could be processed")
            base.signal_task_complete(
                job_id=job_id,
                event_loss_table=dict(),
                num_items=len(missings))
            return

        with logs.tracing('computing risk'):
            loss_ratio_matrix, loss_ratio_curves[hazard_output_id] = (
                calculator(ground_motion_values))

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        loss_ratio_curves[hazard_output_id]):
                    asset = assets[i]

                    # loss curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

                    # insured losses
                    if insured_losses:
                        insured_loss_curve = scientific.event_based(
                            scientific.insured_losses(
                                loss_ratio_matrix[i],
                                asset.value,
                                asset.deductible,
                                asset.ins_limit),
                            tses,
                            time_span,
                            loss_curve_resolution)

                        insured_loss_curve.abscissae = (
                            insured_loss_curve.abscissae / asset.value)
                        general.write_loss_curve(
                            insured_curve_id, asset, insured_loss_curve)

                # update the event loss table of this task
                for i, asset in enumerate(assets):
                    for j, rupture_id in enumerate(rupture_id_matrix[i]):
                        loss = loss_ratio_matrix[i][j] * asset.value
                        event_loss_table[rupture_id] = (
                            event_loss_table.get(rupture_id, 0) + loss)

                # update the aggregate losses
                aggregate_losses = sum(
                    loss_ratio_matrix[i] * asset.value
                    for i, asset in enumerate(assets))
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, aggregate_losses)

    # compute mean and quantile loss curves if multiple hazard
    # realizations are computed
    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = loss_ratio_curves.values()

                # here we are relying on the fact that assets do not
                # change across different logic tree realizations (as
                # the hazard grid does not change, so the hazard
                # getters always returns the same assets)
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              event_loss_table=event_loss_table)