コード例 #1
0
ファイル: core.py プロジェクト: 4x/oq-engine
def scenario(job_id, hazard, seed, vulnerability_function, output_containers,
             asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmfscenario') to a tuple where the first element is an instance of
      :class:`..hazard_getters.GroundMotionScenarioGetter2`, and the second
      element is the corresponding weight.
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param asset_correlation: asset correlation coefficient
    """

    calc = api.Scenario(vulnerability_function, seed, asset_correlation)

    hazard_getter = hazard.values()[0][0]

    assets, ground_motion_values, missings = hazard_getter()

    loss_ratio_matrix = calc(ground_motion_values)

    # Risk output container id
    outputs_id = output_containers.values()[0][0]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, asset in enumerate(assets):
            general.write_loss_map_data(
                outputs_id, asset,
                loss_ratio_matrix[i].mean(),
                std_dev=loss_ratio_matrix[i].std(ddof=1))

    aggregate_losses = sum(loss_ratio_matrix[i] * asset.value
                           for i, asset in enumerate(assets))

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              aggregate_losses=aggregate_losses)
コード例 #2
0
ファイル: core.py プロジェクト: gvallarelli/oq-engine
def scenario(job_id, assets, hazard_getter_name, hazard,
        seed, vulnerability_function, output_containers,
        imt, asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of :class:`openquake.risklib.scientific.Asset`
        instances considered
    :param hazard_getter_name: the name of an hazard getter to be used
    :param hazard: the hazard output dictionary
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param imt: the Intensity Measure Type of the ground motion field
    :param asset_correlation: asset correlation coefficient
    """

    calc = openquake.risklib.api.Scenario(vulnerability_function,
            seed, asset_correlation)

    hazard_getter = general.hazard_getter(hazard_getter_name,
                    hazard.keys()[0], imt)

    outputs = calc(assets, [hazard_getter(a.site) for a in assets])

    # Risk output container id
    outputs_id = output_containers.values()[0][0]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, output in enumerate(outputs):
            general.write_loss_map_data(outputs_id, assets[i].asset_ref,
                value=output.mean, std_dev=output.standard_deviation,
                location=assets[i].site)

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
コード例 #3
0
ファイル: core.py プロジェクト: xpb/oq-engine
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    loss_ratio_curves = OrderedDict()
    event_loss_table = dict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        with logs.tracing('getting input data from db'):
            assets, gmvs_ruptures, missings = hazard_getter()

        if len(assets):
            ground_motion_values = numpy.array(gmvs_ruptures)[:, 0]
            rupture_id_matrix = numpy.array(gmvs_ruptures)[:, 1]
        else:
            # we are relying on the fact that if all the hazard_getter
            # in this task will either return some results or they all
            # return an empty result set.
            logs.LOG.info("Exit from task as no asset could be processed")
            base.signal_task_complete(
                job_id=job_id,
                event_loss_table=dict(),
                num_items=len(missings))
            return

        with logs.tracing('computing risk'):
            loss_ratio_matrix, loss_ratio_curves[hazard_output_id] = (
                calculator(ground_motion_values))

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        loss_ratio_curves[hazard_output_id]):
                    asset = assets[i]

                    # loss curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

                    # insured losses
                    if insured_losses:
                        insured_loss_curve = scientific.event_based(
                            scientific.insured_losses(
                                loss_ratio_matrix[i],
                                asset.value,
                                asset.deductible,
                                asset.ins_limit),
                            tses,
                            time_span,
                            loss_curve_resolution)

                        insured_loss_curve.abscissae = (
                            insured_loss_curve.abscissae / asset.value)
                        general.write_loss_curve(
                            insured_curve_id, asset, insured_loss_curve)

                # update the event loss table of this task
                for i, asset in enumerate(assets):
                    for j, rupture_id in enumerate(rupture_id_matrix[i]):
                        loss = loss_ratio_matrix[i][j] * asset.value
                        event_loss_table[rupture_id] = (
                            event_loss_table.get(rupture_id, 0) + loss)

                # update the aggregate losses
                aggregate_losses = sum(
                    loss_ratio_matrix[i] * asset.value
                    for i, asset in enumerate(assets))
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, aggregate_losses)

    # compute mean and quantile loss curves if multiple hazard
    # realizations are computed
    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = loss_ratio_curves.values()

                # here we are relying on the fact that assets do not
                # change across different logic tree realizations (as
                # the hazard grid does not change, so the hazard
                # getters always returns the same assets)
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              event_loss_table=event_loss_table)
コード例 #4
0
ファイル: core.py プロジェクト: xpb/oq-engine
def classical(job_id, hazard, vulnerability_function, output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'hazard_curve') to a tuple where the first element is an instance of
      :class:`..hazard_getters.HazardCurveGetter`, and the second element is
      the corresponding weight.
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    calculator = api.Classical(
        vulnerability_function, lrem_steps_per_interval)

    for hazard_output_id, hazard_data in hazard.items():
        # the second item of the tuple is the weight of the hazard (at
        # this moment we are not interested in it)
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        with logs.tracing('getting hazard'):
            assets, hazard_curves, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        asset_outputs[hazard_output_id]):

                    asset = assets[i]

                    # Write Loss Curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # Then conditional loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = asset_outputs.values()
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
コード例 #5
0
ファイル: core.py プロジェクト: xpb/oq-engine
def scenario(job_id, hazard, seed, vulnerability_function, output_containers,
             insured_losses, asset_correlation):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmfscenario') to a tuple where the first element is an instance of
      :class:`..hazard_getters.GroundMotionScenarioGetter`, and the second
      element is the corresponding weight.
    :param seed: the seed used to initialize the rng
    :param output_containers: a dictionary {hazard_id: output_id}
        where output id represents the id of the loss map
    :param bool insured_losses: True if also insured losses should be computed
    :param asset_correlation: asset correlation coefficient
    """

    calc = api.Scenario(vulnerability_function, seed, asset_correlation)

    hazard_getter = hazard.values()[0][0]

    with EnginePerformanceMonitor('hazard_getter', job_id, scenario):
        assets, ground_motion_values, missings = hazard_getter()

    if not len(assets):
        logs.LOG.info("Exit from task as no asset could be processed")
        base.signal_task_complete(job_id=job_id,
                                  aggregate_losses=None,
                                  insured_aggregate_losses=None,
                                  num_items=len(missings))
        return

    with logs.tracing('computing risk'):
        loss_ratio_matrix = calc(ground_motion_values)

        if insured_losses:
            insured_loss_matrix = [
                scientific.insured_losses(
                    loss_ratio_matrix[i], asset.value,
                    asset.deductible, asset.ins_limit)
                for i, asset in enumerate(assets)]

    # There is only one output container list as there is no support
    # for hazard logic tree
    output_containers = output_containers.values()[0]

    loss_map_id = output_containers[0]

    if insured_losses:
        insured_loss_map_id = output_containers[1]

    with db.transaction.commit_on_success(using='reslt_writer'):
        for i, asset in enumerate(assets):
            general.write_loss_map_data(
                loss_map_id, asset,
                loss_ratio_matrix[i].mean(),
                std_dev=loss_ratio_matrix[i].std(ddof=1))

            if insured_losses:
                general.write_loss_map_data(
                    insured_loss_map_id, asset,
                    insured_loss_matrix[i].mean() / asset.value,
                    std_dev=insured_loss_matrix[i].std(ddof=1) / asset.value)

    aggregate_losses = sum(loss_ratio_matrix[i] * asset.value
                           for i, asset in enumerate(assets))

    if insured_losses:
        insured_aggregate_losses = (
            numpy.array(insured_loss_matrix).transpose().sum(axis=1))
    else:
        insured_aggregate_losses = "Not computed"

    base.signal_task_complete(
        job_id=job_id,
        num_items=len(assets) + len(missings),
        aggregate_losses=aggregate_losses,
        insured_aggregate_losses=insured_aggregate_losses)