コード例 #1
0
ファイル: core.py プロジェクト: gvallarelli/oq-engine
def scenario_damage(job_id, assets, hazard_getter, hazard,
                    taxonomy, fragility_model, fragility_functions,
                    output_containers, imt):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of :class:`openquake.risklib.scientific.Asset`
    instances considered
    :param hazard_getter: the name of an hazard getter to be used
    :param hazard: the hazard output dictionary
    :param taxonomy: the taxonomy being considered
    :param fragility_model: a
    :class:`openquake.risklib.models.input.FragilityModel object
    :param fragility_functions: a
    :class:`openquake.risklib.models.input.FragilityFunctionSeq object
    :param output_containers: a dictionary {hazard_id: output_id}
    of output_type "dmg_dist_per_asset"
    :param imt: the Intensity Measure Type of the ground motion field
    """
    calculator = api.ScenarioDamage(fragility_model, fragility_functions)
    for hazard_id in hazard:
        hazard_getter = general.hazard_getter(hazard_getter, hazard_id, imt)
        outputs = calculator(assets, [hazard_getter(a.site) for a in assets])
        with logs.tracing('save statistics per site'), \
                db.transaction.commit_on_success(using='reslt_writer'):
            rc_id = models.OqJob.objects.get(id=job_id).risk_calculation.id
            for output in outputs:
                save_dist_per_asset(output.fractions, rc_id, output.asset)

    # send aggregate fractions to the controller, the hook will collect them
    aggfractions = sum(o.fractions for o in outputs)
    base.signal_task_complete(job_id=job_id, num_items=len(assets),
                              fractions=aggfractions, taxonomy=taxonomy)
コード例 #2
0
ファイル: core.py プロジェクト: gvallarelli/oq-engine
def classical_bcr(job_id, assets, hazard_getter_name, hazard,
                  vulnerability_function, vulnerability_function_retrofitted,
                  output_containers, lrem_steps_per_interval,
                  asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the classical
    calculator.

    Instantiates risklib calculators, computes BCR and stores the
    results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param assets:
      list of Assets to take into account
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters`
      to be instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to HazardCurve ID
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param int lrem_steps_per_interval
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param float interest_rate
      The interest rate used in the Cost Benefit Analysis
    :param float asset_life_expectancy
      The life expectancy used for every asset
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        hazard_getter = general.hazard_getter(hazard_getter_name, hazard_id)

        calculator = api.BCR(
            api.Classical(vulnerability_function, lrem_steps_per_interval),
            api.Classical(vulnerability_function_retrofitted,
                          lrem_steps_per_interval),
            interest_rate,
            asset_life_expectancy)

        with logs.tracing('getting hazard'):
            hazard_curves = [hazard_getter(asset.site) for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)
    base.signal_task_complete(job_id=job_id, num_items=len(assets))
コード例 #3
0
ファイル: core.py プロジェクト: gvallarelli/oq-engine
def scenario(job_id, assets, hazard_getter_name, hazard,
        seed, vulnerability_function, output_containers,
        imt, asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of :class:`openquake.risklib.scientific.Asset`
        instances considered
    :param hazard_getter_name: the name of an hazard getter to be used
    :param hazard: the hazard output dictionary
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param imt: the Intensity Measure Type of the ground motion field
    :param asset_correlation: asset correlation coefficient
    """

    calc = openquake.risklib.api.Scenario(vulnerability_function,
            seed, asset_correlation)

    hazard_getter = general.hazard_getter(hazard_getter_name,
                    hazard.keys()[0], imt)

    outputs = calc(assets, [hazard_getter(a.site) for a in assets])

    # Risk output container id
    outputs_id = output_containers.values()[0][0]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, output in enumerate(outputs):
            general.write_loss_map_data(outputs_id, assets[i].asset_ref,
                value=output.mean, std_dev=output.standard_deviation,
                location=assets[i].site)

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
コード例 #4
0
ファイル: core.py プロジェクト: gvallarelli/oq-engine
def event_based(job_id, assets, hazard_getter_name, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                imt, time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of `:class:openquake.risklib.scientific.Asset`
    instances considered
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to
      get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to GmfCollection ID
    :param seed: the seed used to initialize the rng

    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param str imt: the imt used to filter ground motion fields
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    asset_outputs = OrderedDict()
    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(
            hazard_getter_name, hazard_id, imt)

        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        if insured_losses:
            calculator = api.InsuredLosses(calculator)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            ground_motion_fields = [hazard_getter(asset.site)
                                    for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(
                assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

                    if asset_output.insured_losses:
                        general.write_loss_curve(
                            insured_curve_id, assets[i], asset_output)
                losses = sum(asset_output.losses
                             for asset_output
                             in asset_outputs[hazard_output_id])
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, losses)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
コード例 #5
0
ファイル: core.py プロジェクト: gvallarelli/oq-engine
def classical(job_id, assets, hazard_getter_name, hazard,
              vulnerability_function,
              output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param assets:
      iterator over :class:`openquake.engine.db.models.ExposureData` to take
      into account
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to HazardCurve ID
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(hazard_getter_name, hazard_id)

        calculator = api.Classical(
            vulnerability_function, lrem_steps_per_interval)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            hazard_curves = [hazard_getter(asset.site) for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
コード例 #6
0
ファイル: core.py プロジェクト: gvallarelli/oq-engine
def event_based_bcr(job_id, assets, hazard_getter_name, hazard, seed,
                    vulnerability_function, vulnerability_function_retrofitted,
                    output_containers, imt, time_span, tses,
                    loss_curve_resolution, asset_correlation,
                    asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the event based
    calculator.

    Instantiates risklib calculators, computes bcr
    and stores results to db in a single transaction.

    :param int job_id:
        ID of the currently running job.
    :param assets:
        list of assets to compute.
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to GmfCollection ID
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param float imt:
        Intensity Measure Type to take into account.
    :param float time_span:
        Time Span of the hazard calculation.
    :param float tses:
        Time of the Stochastic Event Set.
    :param int loss_curve_resolution:
        Resolution of the computed loss curves (number of points).
    :param int seed:
        Seed used to generate random values.
    :param float asset_correlation:
        asset correlation (0 uncorrelated, 1 perfectly correlated).
    :param float interest_rate
        The interest rate used in the Cost Benefit Analysis.
    :param float asset_life_expectancy
        The life expectancy used for every asset.
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        hazard_getter = general.hazard_getter(
            hazard_getter_name, hazard_id, imt)

        calculator = api.ProbabilisticEventBased(
            vulnerability_function, curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        calculator_retrofitted = api.ProbabilisticEventBased(
            vulnerability_function_retrofitted,
            curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        bcr_calculator = api.BCR(calculator, calculator_retrofitted,
                                 interest_rate, asset_life_expectancy)

        with logs.tracing('getting hazard'):
            ground_motion_fields = [hazard_getter(asset.site)
                                    for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = bcr_calculator(assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)

    base.signal_task_complete(job_id=job_id, num_items=len(assets))