Example #1
0
def event_based(job_id, assets, hazard_getter_name, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                imt, time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of `:class:openquake.risklib.scientific.Asset`
    instances considered
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to
      get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to GmfCollection ID
    :param seed: the seed used to initialize the rng

    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param str imt: the imt used to filter ground motion fields
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    asset_outputs = OrderedDict()
    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(
            hazard_getter_name, hazard_id, imt)

        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        if insured_losses:
            calculator = api.InsuredLosses(calculator)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            ground_motion_fields = [hazard_getter(asset.site)
                                    for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(
                assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

                    if asset_output.insured_losses:
                        general.write_loss_curve(
                            insured_curve_id, assets[i], asset_output)
                losses = sum(asset_output.losses
                             for asset_output
                             in asset_outputs[hazard_output_id])
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, losses)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Example #2
0
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    asset_outputs = OrderedDict()
    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        if insured_losses:
            calculator = api.InsuredLosses(calculator)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting input data from db'):
            assets, ground_motion_values, missings = hazard_getter()

        with logs.tracing('computing risk'):
            asset_outputs[hazard_output_id] = calculator(
                assets, ground_motion_values)

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

                    if asset_output.insured_losses:
                        general.write_loss_curve(
                            insured_curve_id, assets[i], asset_output)
                losses = sum(asset_output.losses
                             for asset_output
                             in asset_outputs[hazard_output_id])
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, losses)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Example #3
0
def classical(job_id, hazard, vulnerability_function, output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'hazard_curve') to a tuple where the first element is an instance of
      :class:`..hazard_getters.HazardCurveGetter`, and the second element is
      the corresponding weight.
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    calculator = api.Classical(
        vulnerability_function, lrem_steps_per_interval)

    for hazard_output_id, hazard_data in hazard.items():
        # the second item of the tuple is the weight of the hazard (at
        # this moment we are not interested in it)
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        with logs.tracing('getting hazard'):
            assets, hazard_curves, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        asset_outputs[hazard_output_id]):

                    asset = assets[i]

                    # Write Loss Curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # Then conditional loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = asset_outputs.values()
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Example #4
0
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    loss_ratio_curves = OrderedDict()
    event_loss_table = dict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        with logs.tracing('getting input data from db'):
            assets, gmvs_ruptures, missings = hazard_getter()

        if len(assets):
            ground_motion_values = numpy.array(gmvs_ruptures)[:, 0]
            rupture_id_matrix = numpy.array(gmvs_ruptures)[:, 1]
        else:
            # we are relying on the fact that if all the hazard_getter
            # in this task will either return some results or they all
            # return an empty result set.
            logs.LOG.info("Exit from task as no asset could be processed")
            base.signal_task_complete(
                job_id=job_id,
                event_loss_table=dict(),
                num_items=len(missings))
            return

        with logs.tracing('computing risk'):
            loss_ratio_matrix, loss_ratio_curves[hazard_output_id] = (
                calculator(ground_motion_values))

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        loss_ratio_curves[hazard_output_id]):
                    asset = assets[i]

                    # loss curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

                    # insured losses
                    if insured_losses:
                        insured_loss_curve = scientific.event_based(
                            scientific.insured_losses(
                                loss_ratio_matrix[i],
                                asset.value,
                                asset.deductible,
                                asset.ins_limit),
                            tses,
                            time_span,
                            loss_curve_resolution)

                        insured_loss_curve.abscissae = (
                            insured_loss_curve.abscissae / asset.value)
                        general.write_loss_curve(
                            insured_curve_id, asset, insured_loss_curve)

                # update the event loss table of this task
                for i, asset in enumerate(assets):
                    for j, rupture_id in enumerate(rupture_id_matrix[i]):
                        loss = loss_ratio_matrix[i][j] * asset.value
                        event_loss_table[rupture_id] = (
                            event_loss_table.get(rupture_id, 0) + loss)

                # update the aggregate losses
                aggregate_losses = sum(
                    loss_ratio_matrix[i] * asset.value
                    for i, asset in enumerate(assets))
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, aggregate_losses)

    # compute mean and quantile loss curves if multiple hazard
    # realizations are computed
    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = loss_ratio_curves.values()

                # here we are relying on the fact that assets do not
                # change across different logic tree realizations (as
                # the hazard grid does not change, so the hazard
                # getters always returns the same assets)
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              event_loss_table=event_loss_table)
Example #5
0
def classical(job_id, assets, hazard_getter_name, hazard,
              vulnerability_function,
              output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param assets:
      iterator over :class:`openquake.engine.db.models.ExposureData` to take
      into account
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to HazardCurve ID
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(hazard_getter_name, hazard_id)

        calculator = api.Classical(
            vulnerability_function, lrem_steps_per_interval)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            hazard_curves = [hazard_getter(asset.site) for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))