Ejemplo n.º 1
0
def event_based(job_id, units, containers, params):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict units:
      A list of :class:`openquake.risklib.workflows.CalculationUnit` instances
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (e.g. a LossCurve)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """

    def profile(name):
        return EnginePerformanceMonitor(
            name, job_id, event_based, tracing=True)

    # Do the job in other functions, such that they can be unit tested
    # without the celery machinery
    event_loss_tables = dict()

    with db.transaction.commit_on_success(using='reslt_writer'):
        for unit in units:
            event_loss_tables[unit.loss_type] = do_event_based(
                unit,
                containers.with_args(loss_type=unit.loss_type),
                params, profile)
    num_items = base.get_num_items(units)
    signal_task_complete(job_id=job_id,
                         num_items=num_items,
                         event_loss_tables=event_loss_tables)
Ejemplo n.º 2
0
def scenario_damage(job_id, units, containers, params):
    """
    Celery task for the scenario damage risk calculator.

    :param int job_id:
      ID of the currently running job
    :param list units:
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (in this case only `LossMap`)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """
    def profile(name):
        return EnginePerformanceMonitor(name,
                                        job_id,
                                        scenario_damage,
                                        tracing=True)

    # in scenario damage calculation we have only ONE calculation unit
    unit = units[0]

    # and NO containes
    assert len(containers) == 0

    with db.transaction.commit_on_success(using='reslt_writer'):
        fractions, taxonomy = do_scenario_damage(unit, params, profile)

    num_items = base.get_num_items(units)
    signal_task_complete(job_id=job_id,
                         num_items=num_items,
                         fractions=fractions,
                         taxonomy=taxonomy)
Ejemplo n.º 3
0
def scenario(job_id, units, containers, params):
    """
    Celery task for the scenario risk calculator.

    :param int job_id:
      ID of the currently running job
    :param dict units:
      A dict of :class:`..base.CalculationUnit` instances keyed by
      loss type string
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (in this case only `LossMap`)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """
    def profile(name):
        return EnginePerformanceMonitor(name, job_id, scenario, tracing=True)

    agg = dict()
    insured = dict()
    with db.transaction.commit_on_success(using='reslt_writer'):
        for loss_type in units:
            # in scenario calculation we have only ONE calculation unit
            unit = units[loss_type][0]
            agg[loss_type], insured[loss_type] = do_scenario(
                loss_type, unit, containers, params, profile)
    num_items = base.get_num_items(units)
    signal_task_complete(
        job_id=job_id, num_items=num_items,
        aggregate_losses=agg, insured_losses=insured)
Ejemplo n.º 4
0
def classical(job_id, units, containers, params):
    """
    Celery task for the classical risk calculator.

    :param int job_id:
      ID of the currently running job
    :param dict units:
      A dict of :class:`..base.CalculationUnit` instances keyed by
      loss type string
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (e.g. a LossCurve)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """
    def profile(name):
        return EnginePerformanceMonitor(name, job_id, classical, tracing=True)

    # Do the job in other functions, such that they can be unit tested
    # without the celery machinery
    with transaction.commit_on_success(using='reslt_writer'):
        for loss_type in units:
            do_classical(
                loss_type, units[loss_type], containers, params, profile)
    num_items = base.get_num_items(units)
    signal_task_complete(job_id=job_id, num_items=num_items)
Ejemplo n.º 5
0
def scenario_damage(job_id, assets, hazard_getter, hazard,
                    taxonomy, fragility_model, fragility_functions,
                    output_containers, imt):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of :class:`openquake.risklib.scientific.Asset`
    instances considered
    :param hazard_getter: the name of an hazard getter to be used
    :param hazard: the hazard output dictionary
    :param taxonomy: the taxonomy being considered
    :param fragility_model: a
    :class:`openquake.risklib.models.input.FragilityModel object
    :param fragility_functions: a
    :class:`openquake.risklib.models.input.FragilityFunctionSeq object
    :param output_containers: a dictionary {hazard_id: output_id}
    of output_type "dmg_dist_per_asset"
    :param imt: the Intensity Measure Type of the ground motion field
    """
    calculator = api.ScenarioDamage(fragility_model, fragility_functions)
    for hazard_id in hazard:
        hazard_getter = general.hazard_getter(hazard_getter, hazard_id, imt)
        outputs = calculator(assets, [hazard_getter(a.site) for a in assets])
        with logs.tracing('save statistics per site'), \
                db.transaction.commit_on_success(using='reslt_writer'):
            rc_id = models.OqJob.objects.get(id=job_id).risk_calculation.id
            for output in outputs:
                save_dist_per_asset(output.fractions, rc_id, output.asset)

    # send aggregate fractions to the controller, the hook will collect them
    aggfractions = sum(o.fractions for o in outputs)
    base.signal_task_complete(job_id=job_id, num_items=len(assets),
                              fractions=aggfractions, taxonomy=taxonomy)
Ejemplo n.º 6
0
def event_based(job_id, units, containers, params):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict units:
      A list of :class:`openquake.risklib.workflows.CalculationUnit` instances
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (e.g. a LossCurve)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """
    def profile(name):
        return EnginePerformanceMonitor(name,
                                        job_id,
                                        event_based,
                                        tracing=True)

    # Do the job in other functions, such that they can be unit tested
    # without the celery machinery
    event_loss_tables = dict()

    with db.transaction.commit_on_success(using='reslt_writer'):
        for unit in units:
            event_loss_tables[unit.loss_type] = do_event_based(
                unit, containers.with_args(loss_type=unit.loss_type), params,
                profile)
    num_items = base.get_num_items(units)
    signal_task_complete(job_id=job_id,
                         num_items=num_items,
                         event_loss_tables=event_loss_tables)
Ejemplo n.º 7
0
def event_based_bcr(job_id, units, containers, params):
    """
    Celery task for the BCR risk calculator based on the event based
    calculator.

    Instantiates risklib calculators, computes bcr
    and stores results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param dict units:
      A dict of :class:`..base.CalculationUnit` instances keyed by
      loss type string
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (in this case only `BCRDistribution`)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """

    def profile(name):
        return EnginePerformanceMonitor(name, job_id, event_based_bcr, tracing=True)

    # Do the job in other functions, such that it can be unit tested
    # without the celery machinery
    with transaction.commit_on_success(using="reslt_writer"):
        for loss_type in units:
            do_event_based_bcr(loss_type, units[loss_type], containers, params, profile)
    num_items = base.get_num_items(units)
    signal_task_complete(job_id=job_id, num_items=num_items)
Ejemplo n.º 8
0
def scenario(job_id, units, containers, _params):
    """
    Celery task for the scenario risk calculator.

    :param int job_id:
      ID of the currently running job
    :param list units:
      A list of :class:`openquake.risklib.workflows.CalculationUnit` instances
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (in this case only `LossMap`)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """
    def profile(name):
        return EnginePerformanceMonitor(name, job_id, scenario, tracing=True)

    agg = dict()
    insured = dict()
    with db.transaction.commit_on_success(using='reslt_writer'):
        for unit in units:
            agg[unit.loss_type], insured[unit.loss_type] = do_scenario(
                unit,
                containers.with_args(loss_type=unit.loss_type,
                                     output_type="loss_map"), profile)
    num_items = base.get_num_items(units)
    signal_task_complete(job_id=job_id,
                         num_items=num_items,
                         aggregate_losses=agg,
                         insured_losses=insured)
Ejemplo n.º 9
0
def scenario(job_id, units, containers, _params):
    """
    Celery task for the scenario risk calculator.

    :param int job_id:
      ID of the currently running job
    :param list units:
      A list of :class:`openquake.risklib.workflows.CalculationUnit` instances
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (in this case only `LossMap`)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """
    def profile(name):
        return EnginePerformanceMonitor(name, job_id, scenario, tracing=True)

    agg = dict()
    insured = dict()
    with db.transaction.commit_on_success(using='reslt_writer'):
        for unit in units:
            agg[unit.loss_type], insured[unit.loss_type] = do_scenario(
                unit,
                containers.with_args(
                    loss_type=unit.loss_type,
                    output_type="loss_map"),
                profile)
    num_items = base.get_num_items(units)
    signal_task_complete(
        job_id=job_id, num_items=num_items,
        aggregate_losses=agg, insured_losses=insured)
Ejemplo n.º 10
0
def scenario_damage(job_id, units, containers, params):
    """
    Celery task for the scenario damage risk calculator.

    :param int job_id:
      ID of the currently running job
    :param list units:
    :param containers:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (in this case only `LossMap`)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    """
    def profile(name):
        return EnginePerformanceMonitor(
            name, job_id, scenario_damage, tracing=True)

    # in scenario damage calculation we have only ONE calculation unit
    unit = units[0]

    # and NO containes
    assert len(containers) == 0

    with db.transaction.commit_on_success(using='reslt_writer'):
        fractions, taxonomy = do_scenario_damage(unit, params, profile)

    num_items = base.get_num_items(units)
    signal_task_complete(
        job_id=job_id, num_items=num_items,
        fractions=fractions, taxonomy=taxonomy)
Ejemplo n.º 11
0
def classical_bcr(job_id, hazard, vulnerability_function,
                  vulnerability_function_retrofitted,
                  output_containers, lrem_steps_per_interval,
                  asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the classical
    calculator.

    Instantiates risklib calculators, computes BCR and stores the
    results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'hazard_curve') to a tuple where the first element is an instance of
      :class:`..hazard_getters.HazardCurveGetter, and the second element is the
      corresponding weight.
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param int lrem_steps_per_interval
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param float interest_rate
      The interest rate used in the Cost Benefit Analysis
    :param float asset_life_expectancy
      The life expectancy used for every asset
    """

    calc_original = api.Classical(
        vulnerability_function, lrem_steps_per_interval)
    calc_retrofitted = api.Classical(
        vulnerability_function_retrofitted, lrem_steps_per_interval)

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        calculator = api.BCR(
            calc_original,
            calc_retrofitted,
            interest_rate,
            asset_life_expectancy)

        with logs.tracing('getting hazard'):
            assets, hazard_curves, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)
    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Ejemplo n.º 12
0
def gmfs(job_id, sites, rupture_id, gmfcoll_id, task_seed, realizations):
    """
    A celery task wrapper function around :func:`compute_gmfs`.
    See :func:`compute_gmfs` for parameter definitions.
    """
    numpy.random.seed(task_seed)
    compute_gmfs(job_id, sites, rupture_id, gmfcoll_id, realizations)
    base.signal_task_complete(job_id=job_id, num_items=len(sites))
Ejemplo n.º 13
0
def classical_bcr(job_id, assets, hazard_getter_name, hazard,
                  vulnerability_function, vulnerability_function_retrofitted,
                  output_containers, lrem_steps_per_interval,
                  asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the classical
    calculator.

    Instantiates risklib calculators, computes BCR and stores the
    results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param assets:
      list of Assets to take into account
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters`
      to be instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to HazardCurve ID
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param int lrem_steps_per_interval
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param float interest_rate
      The interest rate used in the Cost Benefit Analysis
    :param float asset_life_expectancy
      The life expectancy used for every asset
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        hazard_getter = general.hazard_getter(hazard_getter_name, hazard_id)

        calculator = api.BCR(
            api.Classical(vulnerability_function, lrem_steps_per_interval),
            api.Classical(vulnerability_function_retrofitted,
                          lrem_steps_per_interval),
            interest_rate,
            asset_life_expectancy)

        with logs.tracing('getting hazard'):
            hazard_curves = [hazard_getter(asset.site) for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)
    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Ejemplo n.º 14
0
def hazard_curves(job_id, src_ids, lt_rlz_id):
    """
    A celery task wrapper function around :func:`compute_hazard_curves`.
    See :func:`compute_hazard_curves` for parameter definitions.
    """
    logs.LOG.debug('> starting task: job_id=%s, lt_realization_id=%s'
                   % (job_id, lt_rlz_id))

    compute_hazard_curves(job_id, src_ids, lt_rlz_id)
    # Last thing, signal back the control node to indicate the completion of
    # task. The control node needs this to manage the task distribution and
    # keep track of progress.
    logs.LOG.debug('< task complete, signalling completion')
    base.signal_task_complete(job_id=job_id, num_items=len(src_ids))
Ejemplo n.º 15
0
def scenario_damage(job_id, hazard,
                    taxonomy, fragility_functions,
                    _output_containers):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmfscenario') to a tuple where the first element is an instance of
      :class:`..hazard_getters.GroundMotionScenarioGetter`, and the second
      element is the corresponding weight.
    :param taxonomy: the taxonomy being considered
    :param fragility_functions: a
    :class:`openquake.risklib.models.input.FragilityFunctionSequence object
    :param _output_containers: a dictionary {hazard_id: output_id}
    of output_type "dmg_dist_per_asset"
    """
    calculator = api.ScenarioDamage(fragility_functions)

    # Scenario Damage works only on one hazard
    hazard_getter = hazard.values()[0][0]

    assets, ground_motion_values, missings = hazard_getter()

    if not len(assets):
        logs.LOG.warn("Exit from task as no asset could be processed")
        base.signal_task_complete(
            job_id=job_id, fractions=None,
            num_items=len(missings), taxonomy=taxonomy)
        return

    fraction_matrix = calculator(ground_motion_values)

    with logs.tracing('save statistics per site'), \
            db.transaction.commit_on_success(using='reslt_writer'):
        rc_id = models.OqJob.objects.get(id=job_id).risk_calculation.id
        for i, asset in enumerate(assets):
            save_dist_per_asset(
                fraction_matrix[i] * asset.number_of_units, rc_id, asset)

    # send aggregate fractions to the controller, the hook will collect them
    aggfractions = sum(fraction_matrix[i] * asset.number_of_units
                       for i, asset in enumerate(assets))
    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              fractions=aggfractions, taxonomy=taxonomy)
Ejemplo n.º 16
0
def hazard_curves(job_id, src_ids, lt_rlz_id, ltp):
    """
    A celery task wrapper function around :func:`compute_hazard_curves`.
    See :func:`compute_hazard_curves` for parameter definitions.

    :param ltp:
        a :class:`openquake.engine.input.LogicTreeProcessor` instance
    """
    logs.LOG.debug('> starting task: job_id=%s, lt_realization_id=%s' %
                   (job_id, lt_rlz_id))

    compute_hazard_curves(job_id, src_ids, lt_rlz_id, ltp)
    # Last thing, signal back the control node to indicate the completion of
    # task. The control node needs this to manage the task distribution and
    # keep track of progress.
    logs.LOG.debug('< task complete, signalling completion')
    base.signal_task_complete(job_id=job_id, num_items=len(src_ids))
Ejemplo n.º 17
0
Archivo: core.py Proyecto: 4x/oq-engine
def scenario(job_id, hazard, seed, vulnerability_function, output_containers,
             asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmfscenario') to a tuple where the first element is an instance of
      :class:`..hazard_getters.GroundMotionScenarioGetter2`, and the second
      element is the corresponding weight.
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param asset_correlation: asset correlation coefficient
    """

    calc = api.Scenario(vulnerability_function, seed, asset_correlation)

    hazard_getter = hazard.values()[0][0]

    assets, ground_motion_values, missings = hazard_getter()

    loss_ratio_matrix = calc(ground_motion_values)

    # Risk output container id
    outputs_id = output_containers.values()[0][0]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, asset in enumerate(assets):
            general.write_loss_map_data(
                outputs_id, asset,
                loss_ratio_matrix[i].mean(),
                std_dev=loss_ratio_matrix[i].std(ddof=1))

    aggregate_losses = sum(loss_ratio_matrix[i] * asset.value
                           for i, asset in enumerate(assets))

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              aggregate_losses=aggregate_losses)
Ejemplo n.º 18
0
def disagg_task(job_id, block, lt_rlz_id, ltp, calc_type):
    """
    Task wrapper around core hazard curve/disaggregation computation functions.

    :param int job_id:
        ID of the currently running job.
    :param block:
        A sequence of work items for this task to process. In the case of
        hazard curve computation, this is a sequence of source IDs. In the case
        of disaggregation, this is a list of
        :class:`openquake.hazardlib.site.Site` objects.

        For more info, see
        :func:`openquake.engine.calculators.hazard.classical.core.\
compute_hazard_curves`
        if ``calc_type`` is 'hazard_curve' and :func:`compute_disagg` if
        ``calc_type`` is 'disagg'.
    :param lt_rlz_id:
        ID of the :class:`openquake.engine.db.models.LtRealization` for this
        part of the computation.
    :param ltp:
        a :class:`openquake.engine.input.LogicTreeProcessor` instance
    :param calc_type:
        'hazard_curve' or 'disagg'. This indicates more or less the calculation
        phase; first we must computed all of the hazard curves, then we can
        compute the disaggregation histograms.
    """
    if calc_type == 'hazard_curve':
        classical.compute_hazard_curves(job_id, block, lt_rlz_id, ltp)
    elif calc_type == 'disagg':
        compute_disagg(job_id, block, lt_rlz_id, ltp)
    else:
        msg = ('Invalid calculation type "%s";'
               ' expected "hazard_curve" or "disagg"')
        msg %= calc_type
        raise RuntimeError(msg)

    base.signal_task_complete(job_id=job_id,
                              num_items=len(block),
                              calc_type=calc_type)
Ejemplo n.º 19
0
    def test_signal_task_complete(self):
        job_id = 7
        num_sources = 10

        def test_callback(body, message):
            self.assertEqual(dict(job_id=job_id, num_items=num_sources), body)
            message.ack()

        exchange, conn_args = base.exchange_and_conn_args()
        routing_key = base.ROUTING_KEY_FMT % dict(job_id=job_id)
        task_signal_queue = kombu.Queue('tasks.job.%s' % job_id,
                                        exchange=exchange,
                                        routing_key=routing_key,
                                        durable=False,
                                        auto_delete=True)

        with kombu.BrokerConnection(**conn_args) as conn:
            task_signal_queue(conn.channel()).declare()
            with conn.Consumer(task_signal_queue, callbacks=[test_callback]):

                # send the signal:
                base.signal_task_complete(job_id=job_id, num_items=num_sources)
                conn.drain_events()
Ejemplo n.º 20
0
Archivo: core.py Proyecto: 4x/oq-engine
def disagg_task(job_id, block, lt_rlz_id, calc_type):
    """
    Task wrapper around core hazard curve/disaggregation computation functions.

    :param int job_id:
        ID of the currently running job.
    :param block:
        A sequence of work items for this task to process. In the case of
        hazard curve computation, this is a sequence of source IDs. In the case
        of disaggregation, this is a list of
        :class:`openquake.hazardlib.site.Site` objects.

        For more info, see
        :func:`openquake.engine.calculators.hazard.classical.core.\
compute_hazard_curves`
        if ``calc_type`` is 'hazard_curve' and :func:`compute_disagg` if
        ``calc_type`` is 'disagg'.
    :param lt_rlz_id:
        ID of the :class:`openquake.engine.db.models.LtRealization` for this
        part of the computation.
    :param calc_type:
        'hazard_curve' or 'disagg'. This indicates more or less the calculation
        phase; first we must computed all of the hazard curves, then we can
        compute the disaggregation histograms.
    """
    if calc_type == 'hazard_curve':
        classical.compute_hazard_curves(job_id, block, lt_rlz_id)
    elif calc_type == 'disagg':
        compute_disagg(job_id, block, lt_rlz_id)
    else:
        msg = ('Invalid calculation type "%s";'
               ' expected "hazard_curve" or "disagg"')
        msg %= calc_type
        raise RuntimeError(msg)

    base.signal_task_complete(
        job_id=job_id, num_items=len(block), calc_type=calc_type)
Ejemplo n.º 21
0
def scenario(job_id, assets, hazard_getter_name, hazard,
        seed, vulnerability_function, output_containers,
        imt, asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of :class:`openquake.risklib.scientific.Asset`
        instances considered
    :param hazard_getter_name: the name of an hazard getter to be used
    :param hazard: the hazard output dictionary
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param imt: the Intensity Measure Type of the ground motion field
    :param asset_correlation: asset correlation coefficient
    """

    calc = openquake.risklib.api.Scenario(vulnerability_function,
            seed, asset_correlation)

    hazard_getter = general.hazard_getter(hazard_getter_name,
                    hazard.keys()[0], imt)

    outputs = calc(assets, [hazard_getter(a.site) for a in assets])

    # Risk output container id
    outputs_id = output_containers.values()[0][0]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, output in enumerate(outputs):
            general.write_loss_map_data(outputs_id, assets[i].asset_ref,
                value=output.mean, std_dev=output.standard_deviation,
                location=assets[i].site)

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Ejemplo n.º 22
0
def gmfs(job_id, rupture_ids, output_id, task_seed, task_no, realizations):
    """
    A celery task wrapper function around :func:`compute_gmfs`.
    See :func:`compute_gmfs` for parameter definitions.

    :param task_seed:
        Value for seeding numpy/scipy in the computation of
        ground motion fields.
    :param realizations:
        Number of ground motion field realizations which are
        going to be created by the task.
    """

    logs.LOG.debug('> starting task: job_id=%s, task_no=%s'
                   % (job_id, task_no))

    numpy.random.seed(task_seed)
    compute_gmfs(job_id, rupture_ids, output_id, task_no, realizations)

    # Last thing, signal back the control node to indicate the completion of
    # task. The control node needs this to manage the task distribution and
    # keep track of progress.
    logs.LOG.debug('< task complete, signalling completion')
    base.signal_task_complete(job_id=job_id, num_items=realizations)
Ejemplo n.º 23
0
    def test_signal_task_complete(self):
        job_id = 7
        num_sources = 10

        def test_callback(body, message):
            self.assertEqual(dict(job_id=job_id, num_items=num_sources),
                body)
            message.ack()

        exchange, conn_args = base.exchange_and_conn_args()
        routing_key = base.ROUTING_KEY_FMT % dict(job_id=job_id)
        task_signal_queue = kombu.Queue(
            'tasks.job.%s' % job_id, exchange=exchange,
            routing_key=routing_key, durable=False, auto_delete=True)

        with kombu.BrokerConnection(**conn_args) as conn:
            task_signal_queue(conn.channel()).declare()
            with conn.Consumer(task_signal_queue,
                callbacks=[test_callback]):

                # send the signal:
                base.signal_task_complete(
                    job_id=job_id, num_items=num_sources)
                conn.drain_events()
Ejemplo n.º 24
0
def ses_and_gmfs(job_id, src_ids, lt_rlz_id, task_seed, result_grp_ordinal):
    """
    Celery task for the stochastic event set calculator.

    Samples logic trees and calls the stochastic event set calculator.

    Once stochastic event sets are calculated, results will be saved to the
    database. See :class:`openquake.engine.db.models.SESCollection`.

    Optionally (specified in the job configuration using the
    `ground_motion_fields` parameter), GMFs can be computed from each rupture
    in each stochastic event set. GMFs are also saved to the database.

    Once all of this work is complete, a signal will be sent via AMQP to let
    the control noe know that the work is complete. (If there is any work left
    to be dispatched, this signal will indicate to the control node that more
    work can be enqueued.)

    :param int job_id:
        ID of the currently running job.
    :param src_ids:
        List of ids of parsed source models from which we will generate
        stochastic event sets/ruptures.
    :param lt_rlz_id:
        Id of logic tree realization model to calculate for.
    :param int task_seed:
        Value for seeding numpy/scipy in the computation of stochastic event
        sets and ground motion fields.
    :param int result_grp_ordinal:
        The result group in which the calculation results will be placed.
        This ID basically corresponds to the sequence number of the task,
        in the context of the entire calculation.
    """
    logs.LOG.debug(('> starting `stochastic_event_sets` task: job_id=%s, '
                    'lt_realization_id=%s') % (job_id, lt_rlz_id))
    numpy.random.seed(task_seed)

    hc = models.HazardCalculation.objects.get(oqjob=job_id)

    cmplt_lt_ses = None
    if hc.complete_logic_tree_ses:
        cmplt_lt_ses = models.SES.objects.get(
            ses_collection__output__oq_job=job_id,
            complete_logic_tree_ses=True)

    if hc.ground_motion_fields:
        # For ground motion field calculation, we need the points of interest
        # for the calculation.
        points_to_compute = hc.points_to_compute()

        imts = [haz_general.imt_to_hazardlib(x)
                for x in hc.intensity_measure_types]

        correl_model = None
        if hc.ground_motion_correlation_model is not None:
            correl_model = haz_general.get_correl_model(hc)

    lt_rlz = models.LtRealization.objects.get(id=lt_rlz_id)
    ltp = logictree.LogicTreeProcessor(hc.id)

    apply_uncertainties = ltp.parse_source_model_logictree_path(
            lt_rlz.sm_lt_path)
    gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)

    sources = list(haz_general.gen_sources(
        src_ids, apply_uncertainties, hc.rupture_mesh_spacing,
        hc.width_of_mfd_bin, hc.area_source_discretization))

    # Compute stochastic event sets
    # For each rupture generated, we can optionally calculate a GMF
    for ses_rlz_n in xrange(1, hc.ses_per_logic_tree_path + 1):
        logs.LOG.debug('> computing stochastic event set %s of %s'
                       % (ses_rlz_n, hc.ses_per_logic_tree_path))

        # This is the container for all ruptures for this stochastic event set
        # (specified by `ordinal` and the logic tree realization).
        # NOTE: Many tasks can contribute ruptures to this SES.
        ses = models.SES.objects.get(
            ses_collection__lt_realization=lt_rlz, ordinal=ses_rlz_n)

        sources_sites = ((src, hc.site_collection) for src in sources)
        ssd_filter = filters.source_site_distance_filter(hc.maximum_distance)
        # Get the filtered sources, ignore the site collection:
        filtered_sources = (src for src, _ in ssd_filter(sources_sites))
        # Calculate stochastic event sets:
        logs.LOG.debug('> computing stochastic event sets')
        if hc.ground_motion_fields:
            gmf_cache = _create_gmf_cache(len(points_to_compute), imts)

            logs.LOG.debug('> computing also ground motion fields')
            # This will be the "container" for all computed ground motion field
            # results for this stochastic event set.
            gmf_set = models.GmfSet.objects.get(
                gmf_collection__lt_realization=lt_rlz, ses_ordinal=ses_rlz_n)

        ses_poissonian = stochastic.stochastic_event_set_poissonian(
            filtered_sources, hc.investigation_time)

        logs.LOG.debug('> looping over ruptures')
        rupture_ordinal = 0
        for rupture in ses_poissonian:
            rupture_ordinal += 1

            # Prepare and save SES ruptures to the db:
            logs.LOG.debug('> saving SES rupture to DB')
            _save_ses_rupture(
                ses, rupture, cmplt_lt_ses, result_grp_ordinal,
                rupture_ordinal)
            logs.LOG.debug('> done saving SES rupture to DB')

            # Compute ground motion fields (if requested)
            logs.LOG.debug('compute ground motion fields?  %s'
                           % hc.ground_motion_fields)
            if hc.ground_motion_fields:
                # Compute and save ground motion fields

                gmf_calc_kwargs = {
                    'rupture': rupture,
                    'sites': hc.site_collection,
                    'imts': imts,
                    'gsim': gsims[rupture.tectonic_region_type],
                    'truncation_level': hc.truncation_level,
                    'realizations': DEFAULT_GMF_REALIZATIONS,
                    'correlation_model': correl_model,
                    'rupture_site_filter':
                        filters.rupture_site_distance_filter(
                            hc.maximum_distance),
                }
                logs.LOG.debug('> computing ground motion fields')
                gmf_dict = gmf_calc.ground_motion_fields(**gmf_calc_kwargs)
                logs.LOG.debug('< done computing ground motion fields')

                # update the gmf cache:
                for k, v in gmf_dict.iteritems():
                    gmf_cache[k] = numpy.append(
                        gmf_cache[k], v, axis=1)

        logs.LOG.debug('< Done looping over ruptures')
        logs.LOG.debug('%s ruptures computed for SES realization %s of %s'
                       % (rupture_ordinal, ses_rlz_n,
                          hc.ses_per_logic_tree_path))
        logs.LOG.debug('< done computing stochastic event set %s of %s'
                       % (ses_rlz_n, hc.ses_per_logic_tree_path))

        if hc.ground_motion_fields:
            # save the GMFs to the DB
            logs.LOG.debug('> saving GMF results to DB')
            _save_gmfs(
                gmf_set, gmf_cache, points_to_compute, result_grp_ordinal)
            logs.LOG.debug('< done saving GMF results to DB')

    logs.LOG.debug('< task complete, signalling completion')
    base.signal_task_complete(job_id=job_id, num_items=len(src_ids))
Ejemplo n.º 25
0
def event_based_bcr(job_id, assets, hazard_getter_name, hazard, seed,
                    vulnerability_function, vulnerability_function_retrofitted,
                    output_containers, imt, time_span, tses,
                    loss_curve_resolution, asset_correlation,
                    asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the event based
    calculator.

    Instantiates risklib calculators, computes bcr
    and stores results to db in a single transaction.

    :param int job_id:
        ID of the currently running job.
    :param assets:
        list of assets to compute.
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to GmfCollection ID
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param float imt:
        Intensity Measure Type to take into account.
    :param float time_span:
        Time Span of the hazard calculation.
    :param float tses:
        Time of the Stochastic Event Set.
    :param int loss_curve_resolution:
        Resolution of the computed loss curves (number of points).
    :param int seed:
        Seed used to generate random values.
    :param float asset_correlation:
        asset correlation (0 uncorrelated, 1 perfectly correlated).
    :param float interest_rate
        The interest rate used in the Cost Benefit Analysis.
    :param float asset_life_expectancy
        The life expectancy used for every asset.
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        hazard_getter = general.hazard_getter(
            hazard_getter_name, hazard_id, imt)

        calculator = api.ProbabilisticEventBased(
            vulnerability_function, curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        calculator_retrofitted = api.ProbabilisticEventBased(
            vulnerability_function_retrofitted,
            curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        bcr_calculator = api.BCR(calculator, calculator_retrofitted,
                                 interest_rate, asset_life_expectancy)

        with logs.tracing('getting hazard'):
            ground_motion_fields = [hazard_getter(asset.site)
                                    for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = bcr_calculator(assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Ejemplo n.º 26
0
def scenario(job_id, hazard, seed, vulnerability_function, output_containers,
             insured_losses, asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmfscenario') to a tuple where the first element is an instance of
      :class:`..hazard_getters.GroundMotionScenarioGetter`, and the second
      element is the corresponding weight.
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param bool insured_losses: True if also insured losses should be computed
    :param asset_correlation: asset correlation coefficient
    """

    calc = api.Scenario(vulnerability_function, seed, asset_correlation)

    hazard_getter = hazard.values()[0][0]

    with EnginePerformanceMonitor('hazard_getter', job_id, scenario):
        assets, ground_motion_values, missings = hazard_getter()

    if not len(assets):
        logs.LOG.info("Exit from task as no asset could be processed")
        base.signal_task_complete(job_id=job_id,
                                  aggregate_losses=None,
                                  insured_aggregate_losses=None,
                                  num_items=len(missings))
        return

    with logs.tracing('computing risk'):
        loss_ratio_matrix = calc(ground_motion_values)

        if insured_losses:
            insured_loss_matrix = [
                scientific.insured_losses(
                    loss_ratio_matrix[i], asset.value,
                    asset.deductible, asset.ins_limit)
                for i, asset in enumerate(assets)]

    # There is only one output container list as there is no support
    # for hazard logic tree
    output_containers = output_containers.values()[0]

    loss_map_id = output_containers[0]

    if insured_losses:
        insured_loss_map_id = output_containers[1]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, asset in enumerate(assets):
            general.write_loss_map_data(
                loss_map_id, asset,
                loss_ratio_matrix[i].mean(),
                std_dev=loss_ratio_matrix[i].std(ddof=1))

            if insured_losses:
                general.write_loss_map_data(
                    insured_loss_map_id, asset,
                    insured_loss_matrix[i].mean() / asset.value,
                    std_dev=insured_loss_matrix[i].std(ddof=1) / asset.value)

    aggregate_losses = sum(loss_ratio_matrix[i] * asset.value
                           for i, asset in enumerate(assets))

    if insured_losses:
        insured_aggregate_losses = (
            numpy.array(insured_loss_matrix).transpose().sum(axis=1))
    else:
        insured_aggregate_losses = "Not computed"

    base.signal_task_complete(
        job_id=job_id,
        num_items=len(assets) + len(missings),
        aggregate_losses=aggregate_losses,
        insured_aggregate_losses=insured_aggregate_losses)
Ejemplo n.º 27
0
def classical(job_id, assets, hazard_getter_name, hazard,
              vulnerability_function,
              output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param assets:
      iterator over :class:`openquake.engine.db.models.ExposureData` to take
      into account
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to HazardCurve ID
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(hazard_getter_name, hazard_id)

        calculator = api.Classical(
            vulnerability_function, lrem_steps_per_interval)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            hazard_curves = [hazard_getter(asset.site) for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Ejemplo n.º 28
0
def classical(job_id, hazard, vulnerability_function, output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'hazard_curve') to a tuple where the first element is an instance of
      :class:`..hazard_getters.HazardCurveGetter`, and the second element is
      the corresponding weight.
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    calculator = api.Classical(
        vulnerability_function, lrem_steps_per_interval)

    for hazard_output_id, hazard_data in hazard.items():
        # the second item of the tuple is the weight of the hazard (at
        # this moment we are not interested in it)
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        with logs.tracing('getting hazard'):
            assets, hazard_curves, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        asset_outputs[hazard_output_id]):

                    asset = assets[i]

                    # Write Loss Curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # Then conditional loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = asset_outputs.values()
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Ejemplo n.º 29
0
 def fn(job_id, units, *args):
     task(job_id, units, *args)
     num_items = get_num_items(units)
     base.signal_task_complete(job_id=job_id, num_items=num_items)
Ejemplo n.º 30
0
def event_based(job_id, assets, hazard_getter_name, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                imt, time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of `:class:openquake.risklib.scientific.Asset`
    instances considered
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to
      get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to GmfCollection ID
    :param seed: the seed used to initialize the rng

    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param str imt: the imt used to filter ground motion fields
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    asset_outputs = OrderedDict()
    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(
            hazard_getter_name, hazard_id, imt)

        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        if insured_losses:
            calculator = api.InsuredLosses(calculator)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            ground_motion_fields = [hazard_getter(asset.site)
                                    for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(
                assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

                    if asset_output.insured_losses:
                        general.write_loss_curve(
                            insured_curve_id, assets[i], asset_output)
                losses = sum(asset_output.losses
                             for asset_output
                             in asset_outputs[hazard_output_id])
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, losses)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Ejemplo n.º 31
0
def event_based_bcr(job_id, hazard, seed,
                    vulnerability_function, vulnerability_function_retrofitted,
                    output_containers, time_span, tses,
                    loss_curve_resolution, asset_correlation,
                    asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the event based
    calculator.

    Instantiates risklib calculators, computes bcr
    and stores results to db in a single transaction.

    :param int job_id:
        ID of the currently running job.
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is a list
      of list (one for each asset) with the ground motion values used by the
      calculation, and the second element is the corresponding weight.
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param float time_span:
        Time Span of the hazard calculation.
    :param float tses:
        Time of the Stochastic Event Set.
    :param int loss_curve_resolution:
        Resolution of the computed loss curves (number of points).
    :param int seed:
        Seed used to generate random values.
    :param float asset_correlation:
        asset correlation (0 uncorrelated, 1 perfectly correlated).
    :param float interest_rate
        The interest rate used in the Cost Benefit Analysis.
    :param float asset_life_expectancy
        The life expectancy used for every asset.
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calc_original = api.ProbabilisticEventBased(
            vulnerability_function, curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        calc_retrofitted = api.ProbabilisticEventBased(
            vulnerability_function_retrofitted,
            curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        with logs.tracing('getting hazard'):
            assets, gmvs_ruptures, missings = hazard_getter()
            if len(assets):
                ground_motion_values = numpy.array(gmvs_ruptures)[:, 0]
            else:
                # we are relying on the fact that if all the
                # hazard_getter in this task will either return some
                # results or they all return an empty result set.
                logs.LOG.info("Exit from task as no asset could be processed")
                base.signal_task_complete(job_id=job_id,
                                          num_items=len(missings))
                return

        with logs.tracing('computing risk'):
            _, original_loss_curves = calc_original(ground_motion_values)
            _, retrofitted_loss_curves = calc_retrofitted(ground_motion_values)

            eal_original = [
                scientific.mean_loss(*original_loss_curves[i].xy)
                for i in range(len(assets))]

            eal_retrofitted = [
                scientific.mean_loss(*retrofitted_loss_curves[i].xy)
                for i in range(len(assets))]

            bcr_results = [
                scientific.bcr(
                    eal_original[i], eal_retrofitted[i],
                    interest_rate, asset_life_expectancy,
                    asset.value, asset.retrofitting_cost)
                for i, asset in enumerate(assets)]

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.write_bcr_distribution(
                        bcr_distribution_id, asset,
                        eal_original[i], eal_retrofitted[i], bcr_results[i])

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Ejemplo n.º 32
0
 def fn(job_id, units, *args):
     task(job_id, units, *args)
     num_items = get_num_items(units)
     base.signal_task_complete(job_id=job_id, num_items=num_items)
Ejemplo n.º 33
0
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    loss_ratio_curves = OrderedDict()
    event_loss_table = dict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        with logs.tracing('getting input data from db'):
            assets, gmvs_ruptures, missings = hazard_getter()

        if len(assets):
            ground_motion_values = numpy.array(gmvs_ruptures)[:, 0]
            rupture_id_matrix = numpy.array(gmvs_ruptures)[:, 1]
        else:
            # we are relying on the fact that if all the hazard_getter
            # in this task will either return some results or they all
            # return an empty result set.
            logs.LOG.info("Exit from task as no asset could be processed")
            base.signal_task_complete(
                job_id=job_id,
                event_loss_table=dict(),
                num_items=len(missings))
            return

        with logs.tracing('computing risk'):
            loss_ratio_matrix, loss_ratio_curves[hazard_output_id] = (
                calculator(ground_motion_values))

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        loss_ratio_curves[hazard_output_id]):
                    asset = assets[i]

                    # loss curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

                    # insured losses
                    if insured_losses:
                        insured_loss_curve = scientific.event_based(
                            scientific.insured_losses(
                                loss_ratio_matrix[i],
                                asset.value,
                                asset.deductible,
                                asset.ins_limit),
                            tses,
                            time_span,
                            loss_curve_resolution)

                        insured_loss_curve.abscissae = (
                            insured_loss_curve.abscissae / asset.value)
                        general.write_loss_curve(
                            insured_curve_id, asset, insured_loss_curve)

                # update the event loss table of this task
                for i, asset in enumerate(assets):
                    for j, rupture_id in enumerate(rupture_id_matrix[i]):
                        loss = loss_ratio_matrix[i][j] * asset.value
                        event_loss_table[rupture_id] = (
                            event_loss_table.get(rupture_id, 0) + loss)

                # update the aggregate losses
                aggregate_losses = sum(
                    loss_ratio_matrix[i] * asset.value
                    for i, asset in enumerate(assets))
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, aggregate_losses)

    # compute mean and quantile loss curves if multiple hazard
    # realizations are computed
    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = loss_ratio_curves.values()

                # here we are relying on the fact that assets do not
                # change across different logic tree realizations (as
                # the hazard grid does not change, so the hazard
                # getters always returns the same assets)
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              event_loss_table=event_loss_table)
Ejemplo n.º 34
0
def event_based_bcr(job_id, hazard, seed,
                    vulnerability_function, vulnerability_function_retrofitted,
                    output_containers, time_span, tses,
                    loss_curve_resolution, asset_correlation,
                    asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the event based
    calculator.

    Instantiates risklib calculators, computes bcr
    and stores results to db in a single transaction.

    :param int job_id:
        ID of the currently running job.
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is a list
      of list (one for each asset) with the ground motion values used by the
      calculation, and the second element is the corresponding weight.
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param float time_span:
        Time Span of the hazard calculation.
    :param float tses:
        Time of the Stochastic Event Set.
    :param int loss_curve_resolution:
        Resolution of the computed loss curves (number of points).
    :param int seed:
        Seed used to generate random values.
    :param float asset_correlation:
        asset correlation (0 uncorrelated, 1 perfectly correlated).
    :param float interest_rate
        The interest rate used in the Cost Benefit Analysis.
    :param float asset_life_expectancy
        The life expectancy used for every asset.
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function, curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        calculator_retrofitted = api.ProbabilisticEventBased(
            vulnerability_function_retrofitted,
            curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        bcr_calculator = api.BCR(calculator, calculator_retrofitted,
                                 interest_rate, asset_life_expectancy)

        with logs.tracing('getting hazard'):
            assets, ground_motion_fields, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = bcr_calculator(assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Ejemplo n.º 35
0
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    asset_outputs = OrderedDict()
    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        if insured_losses:
            calculator = api.InsuredLosses(calculator)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting input data from db'):
            assets, ground_motion_values, missings = hazard_getter()

        with logs.tracing('computing risk'):
            asset_outputs[hazard_output_id] = calculator(
                assets, ground_motion_values)

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

                    if asset_output.insured_losses:
                        general.write_loss_curve(
                            insured_curve_id, assets[i], asset_output)
                losses = sum(asset_output.losses
                             for asset_output
                             in asset_outputs[hazard_output_id])
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, losses)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))