Example #1
0
    def pre_execute(self):
        """
        In this phase, the general workflow is:
            1. Parse the exposure to get the taxonomies
            2. Parse the available risk models
            3. Initialize progress counters
            4. Validate exposure and risk models
        """
        with logs.tracing('get exposure'):
            self.taxonomies_asset_count = ((
                self.rc.preloaded_exposure_model or loaders.exposure(
                    self.job, self.rc.inputs['exposure'])).taxonomies_in(
                        self.rc.region_constraint))

        with logs.tracing('parse risk models'):
            self.risk_models = self.get_risk_models()

            # consider only the taxonomies in the risk models if
            # taxonomies_from_model has been set to True in the
            # job.ini
            if self.rc.taxonomies_from_model:
                self.taxonomies_asset_count = dict(
                    (t, count)
                    for t, count in self.taxonomies_asset_count.items()
                    if t in self.risk_models)

        self._initialize_progress(sum(self.taxonomies_asset_count.values()))

        for validator_class in self.validators:
            validator = validator_class(self)
            error = validator.get_error()
            if error:
                raise ValueError("""Problems in calculator configuration:
                                 %s""" % error)
Example #2
0
    def pre_execute(self):
        """
        In this phase, the general workflow is:
            1. Parse the exposure to get the taxonomies
            2. Parse the available risk models
            3. Initialize progress counters
            4. Validate exposure and risk models
        """
        with logs.tracing('get exposure'):
            self.taxonomies_asset_count = \
                (self.rc.preloaded_exposure_model or loaders.exposure(
                    self.job, self.rc.inputs['exposure'])
                 ).taxonomies_in(self.rc.region_constraint)

        with logs.tracing('parse risk models'):
            self.risk_models = self.get_risk_models()

            # consider only the taxonomies in the risk models if
            # taxonomies_from_model has been set to True in the
            # job.ini
            if self.rc.taxonomies_from_model:
                self.taxonomies_asset_count = dict(
                    (t, count)
                    for t, count in self.taxonomies_asset_count.items()
                    if t in self.risk_models)

        for validator_class in self.validators:
            validator = validator_class(self)
            error = validator.get_error()
            if error:
                raise ValueError("""Problems in calculator configuration:
                                 %s""" % error)
Example #3
0
def classical_bcr(job_id, hazard, vulnerability_function,
                  vulnerability_function_retrofitted,
                  output_containers, lrem_steps_per_interval,
                  asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the classical
    calculator.

    Instantiates risklib calculators, computes BCR and stores the
    results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'hazard_curve') to a tuple where the first element is an instance of
      :class:`..hazard_getters.HazardCurveGetter, and the second element is the
      corresponding weight.
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param int lrem_steps_per_interval
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param float interest_rate
      The interest rate used in the Cost Benefit Analysis
    :param float asset_life_expectancy
      The life expectancy used for every asset
    """

    calc_original = api.Classical(
        vulnerability_function, lrem_steps_per_interval)
    calc_retrofitted = api.Classical(
        vulnerability_function_retrofitted, lrem_steps_per_interval)

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        calculator = api.BCR(
            calc_original,
            calc_retrofitted,
            interest_rate,
            asset_life_expectancy)

        with logs.tracing('getting hazard'):
            assets, hazard_curves, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)
    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Example #4
0
def classical_bcr(job_id, assets, hazard_getter_name, hazard,
                  vulnerability_function, vulnerability_function_retrofitted,
                  output_containers, lrem_steps_per_interval,
                  asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the classical
    calculator.

    Instantiates risklib calculators, computes BCR and stores the
    results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param assets:
      list of Assets to take into account
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters`
      to be instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to HazardCurve ID
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param int lrem_steps_per_interval
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param float interest_rate
      The interest rate used in the Cost Benefit Analysis
    :param float asset_life_expectancy
      The life expectancy used for every asset
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        hazard_getter = general.hazard_getter(hazard_getter_name, hazard_id)

        calculator = api.BCR(
            api.Classical(vulnerability_function, lrem_steps_per_interval),
            api.Classical(vulnerability_function_retrofitted,
                          lrem_steps_per_interval),
            interest_rate,
            asset_life_expectancy)

        with logs.tracing('getting hazard'):
            hazard_curves = [hazard_getter(asset.site) for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)
    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Example #5
0
    def pre_execute(self):
        """
        In this phase, the general workflow is

        1. Parse the exposure input and store the exposure data (if
        not already present)

        2. Check if the exposure filtered with region_constraint is
        not empty

        3. Parse the risk models

        4. Initialize progress counters

        5. Initialize random number generator
        """

        # reload the risk calculation to avoid getting raw string
        # values instead of arrays
        self.job.risk_calculation = models.RiskCalculation.objects.get(
            pk=self.rc.pk)

        with logs.tracing('store exposure'):
            if self.rc.exposure_input is None:
                queryset = self.rc.inputs.filter(input_type='exposure')

                if queryset.exists():
                    self._store_exposure(queryset.all()[0])
                else:
                    raise RuntimeError("No exposure model given in input")

            self.taxonomies = self.rc.exposure_model.taxonomies_in(
                self.rc.region_constraint)

            if not sum(self.taxonomies.values()):
                raise RuntimeError(
                    ['Region of interest is not covered by the exposure input.'
                     ' This configuration is invalid. '
                     ' Change the region constraint input or use a proper '
                     ' exposure file'])

        with logs.tracing('store risk model'):
            self.set_risk_models()

        imts = self.hc.get_imts()

        if not self.imt in imts:
            raise RuntimeError(
                "There is no hazard output for the intensity measure %s; "
                "the available IMTs are %s" % (self.imt, imts))

        self._initialize_progress(sum(self.taxonomies.values()))

        self.rnd = random.Random()
        self.rnd.seed(self.rc.master_seed)
Example #6
0
    def get_taxonomies(self):
        """
          Parse the exposure input and store the exposure data (if not
          already present). Then, check if the exposure filtered with
          region_constraint is not empty.

          :returns:
              a dictionary mapping taxonomy string to the number of
              assets in that taxonomy
        """
        # if we are not going to use a preloaded exposure, we need to
        # parse and store the exposure from the given xml file
        if self.rc.exposure_input is None:
            queryset = self.rc.inputs.filter(input_type='exposure')
            if queryset.exists():
                with logs.tracing('store exposure'):
                    exposure = self._store_exposure(queryset.all()[0])
            else:
                raise ValueError("No exposure model given in input")
        else:  # exposure has been preloaded. Get it from the rc
            exposure = self.rc.exposure_model

        taxonomies = exposure.taxonomies_in(self.rc.region_constraint)

        if not sum(taxonomies.values()):
            raise ValueError([
                'Region of interest is not covered by the exposure input.'
                ' This configuration is invalid. '
                ' Change the region constraint input or use a proper '
                ' exposure file'
            ])
        return taxonomies
Example #7
0
    def __init__(self, operation, job_id, task=None, tracing=False,
                 profile_pymem=True, profile_pgmem=False, flush=False):
        self.operation = operation
        self.job_id = job_id
        if task:
            self.task = task
            self.task_id = task.request.id
        else:
            self.task = None
            self.task_id = None
        self.tracing = tracing
        self.profile_pymem = profile_pymem
        self.profile_pgmem = profile_pgmem
        self.flush = flush
        if self.profile_pymem and self.pypid is None:
            self.__class__.pypid = os.getpid()
        if self.profile_pgmem and self.pgpid is None:
            # this may be slow
            pgpid = connections['job_init'].cursor().\
                connection.get_backend_pid()
            try:
                psutil.Process(pgpid)
            except psutil.error.NoSuchProcess:  # db on a different machine
                pass
            else:
                self.__class__.pgpid = pgpid
        if tracing:
            self.tracer = logs.tracing(operation)

        super(EnginePerformanceMonitor, self).__init__(
            [self.pypid, self.pgpid])
Example #8
0
    def pre_execute(self):
        """
        In this phase, the general workflow is:
            1. Parse and validate the exposure to get the taxonomies
            2. Parse and validate the available risk models
            3. Validate the given hazard
            4. Initialize progress counters
            5. Update the job stats
        """

        # reload the risk calculation to avoid getting raw string
        # values instead of arrays
        self.job.risk_calculation = models.RiskCalculation.objects.get(
            pk=self.rc.pk)

        self.taxonomies = self.get_taxonomies()

        self.validate_hazard()

        with logs.tracing('parse risk models'):
            self.risk_models = self.get_risk_models()
            self.check_taxonomies(self.risk_models)
            self.check_imts(required_imts(self.risk_models))

        assets_num = sum(self.taxonomies.values())
        self._initialize_progress(assets_num)
Example #9
0
    def export(self, *args, **kwargs):
        """
        If requested by the user, automatically export all result artifacts to
        the specified format. (NOTE: The only export format supported at the
        moment is NRML XML.

        :param exports:
            Keyword arg. List of export types.
        :returns:
            A list of the export filenames, including the absolute path to each
            file.
        """
        exported_files = []

        with logs.tracing('exports'):
            export_dir = self.job.get_param('export_dir')
            export_type = kwargs['exports']
            if export_type:
                outputs = self._get_outputs_for_export()
                for output in outputs:
                    with self.monitor('exporting %s to %s' %
                                      (output.output_type, export_type)):
                        fname = core.export(output.id, export_dir, export_type)
                        if fname:
                            logs.LOG.info('exported %s', fname)
                            exported_files.append(fname)

        return exported_files
Example #10
0
def scenario_damage(job_id, assets, hazard_getter, hazard,
                    taxonomy, fragility_model, fragility_functions,
                    output_containers, imt):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of :class:`openquake.risklib.scientific.Asset`
    instances considered
    :param hazard_getter: the name of an hazard getter to be used
    :param hazard: the hazard output dictionary
    :param taxonomy: the taxonomy being considered
    :param fragility_model: a
    :class:`openquake.risklib.models.input.FragilityModel object
    :param fragility_functions: a
    :class:`openquake.risklib.models.input.FragilityFunctionSeq object
    :param output_containers: a dictionary {hazard_id: output_id}
    of output_type "dmg_dist_per_asset"
    :param imt: the Intensity Measure Type of the ground motion field
    """
    calculator = api.ScenarioDamage(fragility_model, fragility_functions)
    for hazard_id in hazard:
        hazard_getter = general.hazard_getter(hazard_getter, hazard_id, imt)
        outputs = calculator(assets, [hazard_getter(a.site) for a in assets])
        with logs.tracing('save statistics per site'), \
                db.transaction.commit_on_success(using='reslt_writer'):
            rc_id = models.OqJob.objects.get(id=job_id).risk_calculation.id
            for output in outputs:
                save_dist_per_asset(output.fractions, rc_id, output.asset)

    # send aggregate fractions to the controller, the hook will collect them
    aggfractions = sum(o.fractions for o in outputs)
    base.signal_task_complete(job_id=job_id, num_items=len(assets),
                              fractions=aggfractions, taxonomy=taxonomy)
Example #11
0
    def export(self, *args, **kwargs):
        """
        If requested by the user, automatically export all result artifacts to
        the specified format. (NOTE: The only export format supported at the
        moment is NRML XML.

        :param exports:
            Keyword arg. List of export types.
        :returns:
            A list of the export filenames, including the absolute path to each
            file.
        """
        exported_files = []

        with logs.tracing('exports'):
            if 'exports' in kwargs:
                outputs = self._get_outputs_for_export()

                for export_type in kwargs['exports']:
                    for output in outputs:
                        with self.monitor('exporting %s to %s'
                                          % (output.output_type, export_type)):
                            fname = self._do_export(
                                output.id,
                                self.job.calculation.export_dir,
                                export_type
                            )
                            exported_files.append(fname)

        return exported_files
Example #12
0
def do_classical_bcr(loss_type, units, containers, params, profile):
    for unit_orig, unit_retro in utils.pairwise(units):
        with profile('getting hazard'):
            assets, hazard_curves = unit_orig.getter()
            _, hazard_curves_retrofitted = unit_retro.getter()

        with profile('computing bcr'):
            original_loss_curves = unit_orig.calc(hazard_curves)
            retrofitted_loss_curves = unit_retro.calc(
                hazard_curves_retrofitted)

            eal_original = [
                scientific.average_loss(losses, poes)
                for losses, poes in original_loss_curves]

            eal_retrofitted = [
                scientific.average_loss(losses, poes)
                for losses, poes in retrofitted_loss_curves]

            bcr_results = [
                scientific.bcr(
                    eal_original[i], eal_retrofitted[i],
                    params.interest_rate, params.asset_life_expectancy,
                    asset.value(loss_type), asset.retrofitted(loss_type))
                for i, asset in enumerate(assets)]

        with logs.tracing('writing results'):
            containers.write(
                assets, zip(eal_original, eal_retrofitted, bcr_results),
                output_type="bcr_distribution",
                loss_type=loss_type,
                hazard_output_id=unit_orig.getter.hazard_output.id)
Example #13
0
File: core.py Project: 4x/oq-engine
def _update_curves(hc, matrices, lt_rlz, src_ids):
    """
    Helper function for updating source, hazard curve, and realization progress
    records in the database.

    This is intended to be used by :func:`compute_hazard_curves`.

    :param hc:
        :class:`openquake.engine.db.models.HazardCalculation` instance.
    :param lt_rlz:
        :class:`openquake.engine.db.models.LtRealization` record for the
        current realization.
    :param src_ids:
        List of source IDs considered for this calculation task.
    """
    with logs.tracing('_update_curves for all IMTs'):
        for imt in hc.intensity_measure_types_and_levels.keys():
            with transaction.commit_on_success():
                logs.LOG.debug('> updating hazard for IMT=%s' % imt)
                hazardlib_imt = haz_general.imt_to_hazardlib(imt)
                query = """
                SELECT * FROM htemp.hazard_curve_progress
                WHERE lt_realization_id = %s
                AND imt = %s
                FOR UPDATE"""
                [hc_progress] = models.HazardCurveProgress.objects.raw(
                    query, [lt_rlz.id, imt])

                hc_progress.result_matrix = update_result_matrix(
                    hc_progress.result_matrix, matrices[hazardlib_imt])
                hc_progress.save()

                logs.LOG.debug('< done updating hazard for IMT=%s' % imt)

        with transaction.commit_on_success():
            # Check here if any of records in source progress model
            # with parsed_source_id from src_ids are marked as complete,
            # and rollback and abort if there is at least one
            src_prog = models.SourceProgress.objects.filter(
                lt_realization=lt_rlz, parsed_source__in=src_ids)

            if any(x.is_complete for x in src_prog):
                msg = (
                    'One or more `source_progress` records were marked as '
                    'complete. This was unexpected and probably means that the'
                    ' calculation workload was not distributed properly.'
                )
                logs.LOG.critical(msg)
                transaction.rollback()
                raise RuntimeError(msg)

            # Mark source_progress records as complete
            src_prog.update(is_complete=True)

            # Update realiation progress,
            # mark realization as complete if it is done
            haz_general.update_realization(lt_rlz.id, len(src_ids))
Example #14
0
def gmfs(job_id, sites, rupture_id, output_id, task_seed, realizations):
    """
    A celery task wrapper function around :func:`compute_gmfs`.
    See :func:`compute_gmfs` for parameter definitions.
    """
    with logs.tracing('computing gmfs'):
        numpy.random.seed(task_seed)
        compute_gmfs(job_id, sites, rupture_id, output_id, realizations)
        base.signal_task_complete(job_id=job_id, num_items=len(sites))
Example #15
0
 def __init__(self, operation, job_id=None, task=None, tracing=False, measuremem=True, autoflush=False):
     super(EnginePerformanceMonitor, self).__init__(operation, autoflush=autoflush, measuremem=measuremem)
     self.job_id = job_id
     if task:
         self.task = task
     else:
         self.task = None
     self.tracing = tracing
     if tracing:
         self.tracer = logs.tracing(operation)
Example #16
0
    def task_arg_gen(self, block_size):
        """
        Generator function for creating the arguments for each task.

        It is responsible for the distribution strategy. It divides
        the considered exposure into chunks of homogeneous assets
        (i.e. having the same taxonomy). The chunk size is given by
        the `block_size` openquake config parameter

        :param int block_size:
            The number of work items per task (sources, sites, etc.).

        :returns:
            An iterator over a list of arguments. Each contains:

            1. the job id
            2. a getter object needed to get the hazard data
            3. the needed risklib calculators
            4. the output containers to be populated
            5. the specific calculator parameter set
        """
        output_containers = writers.combine_builders(
            [builder(self) for builder in self.output_builders])

        num_tasks = 0
        for taxonomy, assets_nr in self.taxonomies_asset_count.items():
            asset_offsets = range(0, assets_nr, block_size)

            for offset in asset_offsets:
                with logs.tracing("getting assets"):
                    assets = models.ExposureData.objects.get_asset_chunk(
                        self.rc, taxonomy, offset, block_size)

                calculation_units = [
                    self.calculation_unit(loss_type, assets)
                    for loss_type in models.loss_types(self.risk_models)
                ]

                num_tasks += 1
                yield [
                    self.job.id, calculation_units, output_containers,
                    self.calculator_parameters
                ]

        # sanity check to protect against future changes of the distribution
        # logic
        expected_tasks = self.expected_tasks(block_size)
        if num_tasks != expected_tasks:
            raise RuntimeError('Expected %d tasks, generated %d!' %
                               (expected_tasks, num_tasks))
Example #17
0
 def __init__(self, operation, job_id, task=None, tracing=False,
              measuremem=True, autoflush=False):
     self.measuremem = measuremem
     pid = os.getpid() if measuremem else None
     super(EnginePerformanceMonitor, self).__init__(
         operation, pid, autoflush=autoflush)
     self.job_id = job_id
     if task:
         self.task = task
     else:
         self.task = None
     self.tracing = tracing
     if tracing:
         self.tracer = logs.tracing(operation)
Example #18
0
    def task_arg_gen(self, block_size):
        """
        Generator function for creating the arguments for each task.

        It is responsible for the distribution strategy. It divides
        the considered exposure into chunks of homogeneous assets
        (i.e. having the same taxonomy). The chunk size is given by
        the `block_size` openquake config parameter

        :param int block_size:
            The number of work items per task (sources, sites, etc.).

        :returns:
            An iterator over a list of arguments. Each contains:

            1. the job id
            2. a getter object needed to get the hazard data
            3. the needed risklib calculators
            4. the output containers to be populated
            5. the specific calculator parameter set
        """
        output_containers = writers.combine_builders(
            [builder(self) for builder in self.output_builders])

        num_tasks = 0
        for taxonomy, assets_nr in self.taxonomies_asset_count.items():
            asset_offsets = range(0, assets_nr, block_size)

            for offset in asset_offsets:
                with logs.tracing("getting assets"):
                    assets = models.ExposureData.objects.get_asset_chunk(
                        self.rc, taxonomy, offset, block_size)

                calculation_units = [
                    self.calculation_unit(loss_type, assets)
                    for loss_type in models.loss_types(self.risk_models)]

                num_tasks += 1
                yield [self.job.id,
                       calculation_units,
                       output_containers,
                       self.calculator_parameters]

        # sanity check to protect against future changes of the distribution
        # logic
        expected_tasks = self.expected_tasks(block_size)
        if num_tasks != expected_tasks:
            raise RuntimeError('Expected %d tasks, generated %d!' % (
                               expected_tasks, num_tasks))
Example #19
0
def scenario_damage(job_id, hazard,
                    taxonomy, fragility_functions,
                    _output_containers):
    """
    Celery task for the scenario damage risk calculator.

    :param job_id: the id of the current
    :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmfscenario') to a tuple where the first element is an instance of
      :class:`..hazard_getters.GroundMotionScenarioGetter`, and the second
      element is the corresponding weight.
    :param taxonomy: the taxonomy being considered
    :param fragility_functions: a
    :class:`openquake.risklib.models.input.FragilityFunctionSequence object
    :param _output_containers: a dictionary {hazard_id: output_id}
    of output_type "dmg_dist_per_asset"
    """
    calculator = api.ScenarioDamage(fragility_functions)

    # Scenario Damage works only on one hazard
    hazard_getter = hazard.values()[0][0]

    assets, ground_motion_values, missings = hazard_getter()

    if not len(assets):
        logs.LOG.warn("Exit from task as no asset could be processed")
        base.signal_task_complete(
            job_id=job_id, fractions=None,
            num_items=len(missings), taxonomy=taxonomy)
        return

    fraction_matrix = calculator(ground_motion_values)

    with logs.tracing('save statistics per site'), \
            db.transaction.commit_on_success(using='reslt_writer'):
        rc_id = models.OqJob.objects.get(id=job_id).risk_calculation.id
        for i, asset in enumerate(assets):
            save_dist_per_asset(
                fraction_matrix[i] * asset.number_of_units, rc_id, asset)

    # send aggregate fractions to the controller, the hook will collect them
    aggfractions = sum(fraction_matrix[i] * asset.number_of_units
                       for i, asset in enumerate(assets))
    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              fractions=aggfractions, taxonomy=taxonomy)
Example #20
0
 def parse_risk_model(self):
     """
     If any risk model is given in the hazard calculation, the
     computation will be driven by risk data. In this case the
     locations will be extracted from the exposure file (if there
     is one) and the imt (and levels) will be extracted from the
     vulnerability model (if there is one)
     """
     oqparam = self.job.get_oqparam()
     if 'exposure' in oqparam.inputs:
         with logs.tracing('storing exposure'):
             exposure.ExposureDBWriter(self.job).serialize(
                 risk_parsers.ExposureModelParser(
                     oqparam.inputs['exposure']))
     models.Imt.save_new(map(from_string, oqparam.imtls))
Example #21
0
 def parse_risk_model(self):
     """
     If any risk model is given in the hazard calculation, the
     computation will be driven by risk data. In this case the
     locations will be extracted from the exposure file (if there
     is one) and the imt (and levels) will be extracted from the
     vulnerability model (if there is one)
     """
     oqparam = self.job.get_oqparam()
     if 'exposure' in oqparam.inputs:
         with logs.tracing('storing exposure'):
             exposure.ExposureDBWriter(
                 self.job).serialize(
                 risk_parsers.ExposureModelParser(
                     oqparam.inputs['exposure']))
     models.Imt.save_new(map(from_string, oqparam.imtls))
Example #22
0
    def _store_exposure(self):
        """Load exposure assets and write them to database."""
        [exposure_model_input] = models.inputs4rcalc(
            self.rc, input_type='exposure')

        # If this was an existing model, it was already parsed and should be in
        # the DB.
        if not self.rc.force_inputs and models.ExposureModel.objects.filter(
                input=exposure_model_input).exists():
            return exposure_model_input.exposuremodel

        with logs.tracing('storing exposure'):
            path = os.path.join(self.rc.base_path, exposure_model_input.path)
            exposure_stream = parsers.ExposureModelParser(path)
            w = exposure_writer.ExposureDBWriter(exposure_model_input)
            w.serialize(exposure_stream)
        return w.model
Example #23
0
    def task_arg_gen(self, block_size):
        """
        Generator function for creating the arguments for each task.

        It is responsible for the distribution strategy. It divides
        the considered exposure into chunks of homogeneous assets
        (i.e. having the same taxonomy). The chunk size is given by
        the `block_size` openquake config parameter

        :param int block_size:
            The number of work items per task (sources, sites, etc.).

        :returns: an iterator over a list of arguments. Each contains
        1) the job id
        2) the exposure subset on which the celery task is applied on
        3) the hazard getter and the hazard_id to be used
        4) a seed (eventually generated from a master seed)
        5) the output containers to be populated
        6) the specific calculator parameter set
        """

        output_containers = self.rc.output_container_builder(self)

        calculator_parameters = self.calculator_parameters

        for taxonomy, assets_nr in self.taxonomies.items():
            asset_offsets = range(0, assets_nr, block_size)

            for offset in asset_offsets:
                with logs.tracing("getting assets"):
                    assets = self.exposure_model.get_asset_chunk(
                        taxonomy,
                        self.rc.region_constraint, offset, block_size)

                hazard = dict((ho.id, self.hazard_output(ho))
                              for ho in self.considered_hazard_outputs())

                # FIXME(lp). Refactor the following arg list such that
                # the arguments are grouped into namedtuples
                yield ([
                    self.job.id,
                    assets,
                    self.hazard_getter, hazard] +
                    self.worker_args(taxonomy) +
                    [output_containers] +
                    calculator_parameters)
Example #24
0
    def export(self, *args, **kwargs):
        """
        If requested by the user, automatically export all result artifacts.

        :returns: A list of the export filenames, including the
            absolute path to each file.
        """

        exported_files = []
        with logs.tracing('exports'):
            if 'exports' in kwargs and kwargs['exports']:
                exported_files = sum([
                    export.risk.export(output.id, self.rc.export_dir)
                    for output in export.core.get_outputs(self.job.id)], [])

                for exp_file in exported_files:
                    logs.LOG.debug('exported %s' % exp_file)
        return exported_files
Example #25
0
    def _store_exposure(self, exposure_model_input):
        """
        Load exposure assets and write them to database.

        :param exposure_model_input: a
        :class:`openquake.engine.db.models.Input` object with input
        type `exposure`
        """

        # If this was an existing model, it was already parsed and should be in
        # the DB.
        if not self.rc.force_inputs and models.ExposureModel.objects.filter(
                input=exposure_model_input).exists():
            return exposure_model_input.exposuremodel

        with logs.tracing('storing exposure'):
            path = os.path.join(self.rc.base_path, exposure_model_input.path)
            exposure_stream = parsers.ExposureModelParser(path)
            w = exposure_writer.ExposureDBWriter(exposure_model_input)
            w.serialize(exposure_stream)
        return w.model
Example #26
0
def _update_curves(hc, matrices, lt_rlz):
    """
    Helper function for updating source, hazard curve, and realization progress
    records in the database.

    This is intended to be used by :func:`compute_hazard_curves`.

    :param hc:
        :class:`openquake.engine.db.models.HazardCalculation` instance.
    :param lt_rlz:
        :class:`openquake.engine.db.models.LtRealization` record for the
        current realization.
    """
    with logs.tracing('_update_curves for all IMTs'):
        for imt in hc.intensity_measure_types_and_levels.keys():
            hazardlib_imt = haz_general.imt_to_hazardlib(imt)
            matrix = matrices[hazardlib_imt]
            if (matrix == 0.0).all():
                # The matrix for this IMT is all zeros; there's no reason to
                # update `hazard_curve_progress` records.
                logs.LOG.debug('* No hazard contribution for IMT=%s' % imt)
                continue
            else:
                # The is some contribution here to the hazard; we need to
                # update.
                with transaction.commit_on_success():
                    logs.LOG.debug('> updating hazard for IMT=%s' % imt)
                    query = """
                    SELECT * FROM htemp.hazard_curve_progress
                    WHERE lt_realization_id = %s
                    AND imt = %s
                    FOR UPDATE"""
                    [hc_progress] = models.HazardCurveProgress.objects.raw(
                        query, [lt_rlz.id, imt])

                    hc_progress.result_matrix = update_result_matrix(
                        hc_progress.result_matrix, matrix)
                    hc_progress.save()

                    logs.LOG.debug('< done updating hazard for IMT=%s' % imt)
Example #27
0
    def __init__(self,
                 operation,
                 job_id,
                 task=None,
                 tracing=False,
                 profile_pymem=True,
                 profile_pgmem=False,
                 flush=False):
        self.operation = operation
        self.job_id = job_id
        if task:
            self.task = task
            self.task_id = task.request.id
        else:
            self.task = None
            self.task_id = None
        self.tracing = tracing
        self.profile_pymem = profile_pymem
        self.profile_pgmem = profile_pgmem
        self.flush = flush
        if self.profile_pymem and self.pypid is None:
            self.__class__.pypid = os.getpid()
        if self.profile_pgmem and self.pgpid is None:
            # this may be slow
            pgpid = connections['job_init'].cursor().\
                connection.get_backend_pid()
            try:
                psutil.Process(pgpid)
            except psutil.error.NoSuchProcess:  # db on a different machine
                pass
            else:
                self.__class__.pgpid = pgpid
        if tracing:
            self.tracer = logs.tracing(operation)

        super(EnginePerformanceMonitor,
              self).__init__([self.pypid, self.pgpid])
Example #28
0
    def parse_risk_models(self):
        """
        If any risk model is given in the hazard calculation, the
        computation will be driven by risk data. In this case the
        locations will be extracted from the exposure file (if there
        is one) and the imt (and levels) will be extracted from the
        vulnerability model (if there is one)
        """
        hc = self.hc
        if hc.vulnerability_models:
            logs.LOG.progress("parsing risk models")

            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            for vf in hc.vulnerability_models:
                intensity_measure_types_and_levels = dict(
                    (record['IMT'], record['IML']) for record in
                    parsers.VulnerabilityModelParser(vf))

                for imt, levels in \
                        intensity_measure_types_and_levels.items():
                    if (imt in hc.intensity_measure_types_and_levels and
                        (set(hc.intensity_measure_types_and_levels[imt]) -
                         set(levels))):
                        logs.LOG.warning(
                            "The same IMT %s is associated with "
                            "different levels" % imt)
                    else:
                        hc.intensity_measure_types_and_levels[imt] = levels

                hc.intensity_measure_types.extend(
                    intensity_measure_types_and_levels)

            # remove possible duplicates
            if hc.intensity_measure_types is not None:
                hc.intensity_measure_types = list(set(
                    hc.intensity_measure_types))
            hc.save()
            logs.LOG.info("Got IMT and levels "
                          "from vulnerability models: %s - %s" % (
                              hc.intensity_measure_types_and_levels,
                              hc.intensity_measure_types))

        if 'fragility' in hc.inputs:
            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            parser = iter(parsers.FragilityModelParser(
                hc.inputs['fragility']))
            hc = self.hc

            fragility_format, _limit_states = parser.next()

            if (fragility_format == "continuous" and
                    hc.calculation_mode != "scenario"):
                raise NotImplementedError(
                    "Getting IMT and levels from "
                    "a continuous fragility model is not yet supported")

            hc.intensity_measure_types_and_levels = dict(
                (iml['IMT'], iml['imls'])
                for _taxonomy, iml, _params, _no_damage_limit in parser)
            hc.intensity_measure_types.extend(
                hc.intensity_measure_types_and_levels)
            hc.save()

        if 'exposure' in hc.inputs:
            with logs.tracing('storing exposure'):
                exposure.ExposureDBWriter(
                    self.job).serialize(
                    parsers.ExposureModelParser(hc.inputs['exposure']))
Example #29
0
def _update_curves(hc, matrices, lt_rlz, src_ids):
    """
    Helper function for updating source, hazard curve, and realization progress
    records in the database.

    This is intended to be used by :func:`compute_hazard_curves`.

    :param hc:
        :class:`openquake.engine.db.models.HazardCalculation` instance.
    :param lt_rlz:
        :class:`openquake.engine.db.models.LtRealization` record for the
        current realization.
    :param src_ids:
        List of source IDs considered for this calculation task.
    """
    with logs.tracing('_update_curves for all IMTs'):
        for imt in hc.intensity_measure_types_and_levels.keys():
            hazardlib_imt = haz_general.imt_to_hazardlib(imt)
            matrix = matrices[hazardlib_imt]
            if (matrix == 0.0).all():
                # The matrix for this IMT is all zeros; there's no reason to
                # update `hazard_curve_progress` records.
                logs.LOG.debug('* No hazard contribution for IMT=%s' % imt)
                continue
            else:
                # The is some contribution here to the hazard; we need to
                # update.
                with transaction.commit_on_success():
                    logs.LOG.debug('> updating hazard for IMT=%s' % imt)
                    query = """
                    SELECT * FROM htemp.hazard_curve_progress
                    WHERE lt_realization_id = %s
                    AND imt = %s
                    FOR UPDATE"""
                    [hc_progress] = models.HazardCurveProgress.objects.raw(
                        query, [lt_rlz.id, imt])

                    hc_progress.result_matrix = update_result_matrix(
                        hc_progress.result_matrix, matrix)
                    hc_progress.save()

                    logs.LOG.debug('< done updating hazard for IMT=%s' % imt)

        with transaction.commit_on_success():
            # Check here if any of records in source progress model
            # with parsed_source_id from src_ids are marked as complete,
            # and rollback and abort if there is at least one
            src_prog = models.SourceProgress.objects.filter(
                lt_realization=lt_rlz, parsed_source__in=src_ids)

            if any(x.is_complete for x in src_prog):
                msg = (
                    'One or more `source_progress` records were marked as '
                    'complete. This was unexpected and probably means that the'
                    ' calculation workload was not distributed properly.')
                logs.LOG.critical(msg)
                transaction.rollback()
                raise RuntimeError(msg)

            # Mark source_progress records as complete
            src_prog.update(is_complete=True)

            # Update realiation progress,
            # mark realization as complete if it is done
            haz_general.update_realization(lt_rlz.id, len(src_ids))
Example #30
0
def classical(job_id, assets, hazard_getter_name, hazard,
              vulnerability_function,
              output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param assets:
      iterator over :class:`openquake.engine.db.models.ExposureData` to take
      into account
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to HazardCurve ID
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data
        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(hazard_getter_name, hazard_id)

        calculator = api.Classical(
            vulnerability_function, lrem_steps_per_interval)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            hazard_curves = [hazard_getter(asset.site) for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(assets, hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Example #31
0
def event_based(job_id, assets, hazard_getter_name, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                imt, time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param assets: the list of `:class:openquake.risklib.scientific.Asset`
    instances considered
    :param str hazard_getter_name: class name of a class defined in the
      :mod:`openquake.engine.calculators.risk.hazard_getters` to be
      instantiated to
      get the hazard curves
    :param dict hazard:
      A dictionary mapping hazard Output ID to GmfCollection ID
    :param seed: the seed used to initialize the rng

    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param str imt: the imt used to filter ground motion fields
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    asset_outputs = OrderedDict()
    for hazard_output_id, hazard_data in hazard.items():
        hazard_id, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        hazard_getter = general.hazard_getter(
            hazard_getter_name, hazard_id, imt)

        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        if insured_losses:
            calculator = api.InsuredLosses(calculator)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting hazard'):
            ground_motion_fields = [hazard_getter(asset.site)
                                    for asset in assets]

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(
                assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

                    if asset_output.insured_losses:
                        general.write_loss_curve(
                            insured_curve_id, assets[i], asset_output)
                losses = sum(asset_output.losses
                             for asset_output
                             in asset_outputs[hazard_output_id])
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, losses)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id, num_items=len(assets))
Example #32
0
    def parse_risk_models(self):
        """
        If any risk model is given in the hazard calculation, the
        computation will be driven by risk data. In this case the
        locations will be extracted from the exposure file (if there
        is one) and the imt (and levels) will be extracted from the
        vulnerability model (if there is one)
        """
        hc = self.hc
        if hc.vulnerability_models:
            logs.LOG.progress("parsing risk models")

            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            for vf in hc.vulnerability_models:
                intensity_measure_types_and_levels = dict(
                    (record['IMT'], record['IML'])
                    for record in parsers.VulnerabilityModelParser(vf))

                for imt, levels in \
                        intensity_measure_types_and_levels.items():
                    if (imt in hc.intensity_measure_types_and_levels and
                        (set(hc.intensity_measure_types_and_levels[imt]) -
                         set(levels))):
                        logs.LOG.warning("The same IMT %s is associated with "
                                         "different levels" % imt)
                    else:
                        hc.intensity_measure_types_and_levels[imt] = levels

                hc.intensity_measure_types.extend(
                    intensity_measure_types_and_levels)

            # remove possible duplicates
            if hc.intensity_measure_types is not None:
                hc.intensity_measure_types = list(
                    set(hc.intensity_measure_types))
            hc.save()
            logs.LOG.info("Got IMT and levels "
                          "from vulnerability models: %s - %s" %
                          (hc.intensity_measure_types_and_levels,
                           hc.intensity_measure_types))

        if 'fragility' in hc.inputs:
            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            parser = iter(parsers.FragilityModelParser(hc.inputs['fragility']))
            hc = self.hc

            fragility_format, _limit_states = parser.next()

            if (fragility_format == "continuous"
                    and hc.calculation_mode != "scenario"):
                raise NotImplementedError(
                    "Getting IMT and levels from "
                    "a continuous fragility model is not yet supported")

            hc.intensity_measure_types_and_levels = dict(
                (iml['IMT'], iml['imls'])
                for _taxonomy, iml, _params, _no_damage_limit in parser)
            hc.intensity_measure_types.extend(
                hc.intensity_measure_types_and_levels)
            hc.save()

        if 'exposure' in hc.inputs:
            with logs.tracing('storing exposure'):
                exposure.ExposureDBWriter(self.job).serialize(
                    parsers.ExposureModelParser(hc.inputs['exposure']))
Example #33
0
def classical(job_id, hazard, vulnerability_function, output_containers,
              lrem_steps_per_interval, conditional_loss_poes,
              hazard_montecarlo_p):
    """
    Celery task for the classical risk calculator.

    Instantiates risklib calculators, computes losses for the given
    assets and stores the results to db in a single transaction.

    :param int job_id:
      ID of the currently running job
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'hazard_curve') to a tuple where the first element is an instance of
      :class:`..hazard_getters.HazardCurveGetter`, and the second element is
      the corresponding weight.
    :param dict output_containers: A dictionary mapping hazard
      Output ID to a tuple (a, b) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves and b is a dictionary that maps poe to ID
      of the :class:`openquake.engine.db.models.LossMap` used to store
      the loss maps
    :param int lrem_steps_per_interval:
      Steps per interval used to compute the Loss Ratio Exceedance matrix
    :param conditional_loss_poes:
      The poes taken into account to compute the loss maps
    :param bool hazard_montecarlo_p:
     (meaningful only if curve statistics are computed). Wheter or not
     the hazard calculation is montecarlo based
    """

    asset_outputs = OrderedDict()

    calculator = api.Classical(
        vulnerability_function, lrem_steps_per_interval)

    for hazard_output_id, hazard_data in hazard.items():
        # the second item of the tuple is the weight of the hazard (at
        # this moment we are not interested in it)
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids) = (
             output_containers[hazard_output_id])

        with logs.tracing('getting hazard'):
            assets, hazard_curves, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs[hazard_output_id] = calculator(hazard_curves)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        asset_outputs[hazard_output_id]):

                    asset = assets[i]

                    # Write Loss Curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # Then conditional loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = asset_outputs.values()
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="support")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Example #34
0
def event_based_bcr(job_id, hazard, seed,
                    vulnerability_function, vulnerability_function_retrofitted,
                    output_containers, time_span, tses,
                    loss_curve_resolution, asset_correlation,
                    asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the event based
    calculator.

    Instantiates risklib calculators, computes bcr
    and stores results to db in a single transaction.

    :param int job_id:
        ID of the currently running job.
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is a list
      of list (one for each asset) with the ground motion values used by the
      calculation, and the second element is the corresponding weight.
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param float time_span:
        Time Span of the hazard calculation.
    :param float tses:
        Time of the Stochastic Event Set.
    :param int loss_curve_resolution:
        Resolution of the computed loss curves (number of points).
    :param int seed:
        Seed used to generate random values.
    :param float asset_correlation:
        asset correlation (0 uncorrelated, 1 perfectly correlated).
    :param float interest_rate
        The interest rate used in the Cost Benefit Analysis.
    :param float asset_life_expectancy
        The life expectancy used for every asset.
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calc_original = api.ProbabilisticEventBased(
            vulnerability_function, curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        calc_retrofitted = api.ProbabilisticEventBased(
            vulnerability_function_retrofitted,
            curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        with logs.tracing('getting hazard'):
            assets, gmvs_ruptures, missings = hazard_getter()
            if len(assets):
                ground_motion_values = numpy.array(gmvs_ruptures)[:, 0]
            else:
                # we are relying on the fact that if all the
                # hazard_getter in this task will either return some
                # results or they all return an empty result set.
                logs.LOG.info("Exit from task as no asset could be processed")
                base.signal_task_complete(job_id=job_id,
                                          num_items=len(missings))
                return

        with logs.tracing('computing risk'):
            _, original_loss_curves = calc_original(ground_motion_values)
            _, retrofitted_loss_curves = calc_retrofitted(ground_motion_values)

            eal_original = [
                scientific.mean_loss(*original_loss_curves[i].xy)
                for i in range(len(assets))]

            eal_retrofitted = [
                scientific.mean_loss(*retrofitted_loss_curves[i].xy)
                for i in range(len(assets))]

            bcr_results = [
                scientific.bcr(
                    eal_original[i], eal_retrofitted[i],
                    interest_rate, asset_life_expectancy,
                    asset.value, asset.retrofitting_cost)
                for i, asset in enumerate(assets)]

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.write_bcr_distribution(
                        bcr_distribution_id, asset,
                        eal_original[i], eal_retrofitted[i], bcr_results[i])

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Example #35
0
def event_based_bcr(job_id, hazard, seed,
                    vulnerability_function, vulnerability_function_retrofitted,
                    output_containers, time_span, tses,
                    loss_curve_resolution, asset_correlation,
                    asset_life_expectancy, interest_rate):
    """
    Celery task for the BCR risk calculator based on the event based
    calculator.

    Instantiates risklib calculators, computes bcr
    and stores results to db in a single transaction.

    :param int job_id:
        ID of the currently running job.
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is a list
      of list (one for each asset) with the ground motion values used by the
      calculation, and the second element is the corresponding weight.
    :param output_containers: A dictionary mapping hazard Output ID to
      a tuple with only the ID of the
      :class:`openquake.engine.db.models.BCRDistribution` output container
      used to store the computed bcr distribution
    :param float time_span:
        Time Span of the hazard calculation.
    :param float tses:
        Time of the Stochastic Event Set.
    :param int loss_curve_resolution:
        Resolution of the computed loss curves (number of points).
    :param int seed:
        Seed used to generate random values.
    :param float asset_correlation:
        asset correlation (0 uncorrelated, 1 perfectly correlated).
    :param float interest_rate
        The interest rate used in the Cost Benefit Analysis.
    :param float asset_life_expectancy
        The life expectancy used for every asset.
    """

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data
        (bcr_distribution_id,) = output_containers[hazard_output_id]

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function, curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        calculator_retrofitted = api.ProbabilisticEventBased(
            vulnerability_function_retrofitted,
            curve_resolution=loss_curve_resolution,
            time_span=time_span, tses=tses,
            seed=seed, correlation=asset_correlation)

        bcr_calculator = api.BCR(calculator, calculator_retrofitted,
                                 interest_rate, asset_life_expectancy)

        with logs.tracing('getting hazard'):
            assets, ground_motion_fields, missings = hazard_getter()

        with logs.tracing('computing risk over %d assets' % len(assets)):
            asset_outputs = bcr_calculator(assets, ground_motion_fields)

        with logs.tracing('writing results'):
            with transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(asset_outputs):
                    general.write_bcr_distribution(
                        bcr_distribution_id, assets[i], asset_output)

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Example #36
0
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    asset_outputs = OrderedDict()
    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        if insured_losses:
            calculator = api.InsuredLosses(calculator)

        # if we need to compute the loss maps, we add the proper risk
        # aggregator
        if conditional_loss_poes:
            calculator = api.ConditionalLosses(
                conditional_loss_poes, calculator)

        with logs.tracing('getting input data from db'):
            assets, ground_motion_values, missings = hazard_getter()

        with logs.tracing('computing risk'):
            asset_outputs[hazard_output_id] = calculator(
                assets, ground_motion_values)

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset_output in enumerate(
                        asset_outputs[hazard_output_id]):
                    general.write_loss_curve(
                        loss_curve_id, assets[i], asset_output)

                    if asset_output.conditional_losses:
                        general.write_loss_map(
                            loss_map_ids, assets[i], asset_output)

                    if asset_output.insured_losses:
                        general.write_loss_curve(
                            insured_curve_id, assets[i], asset_output)
                losses = sum(asset_output.losses
                             for asset_output
                             in asset_outputs[hazard_output_id])
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, losses)

    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        [asset_output[i].loss_ratio_curve
                         for asset_output in asset_outputs.values()],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings))
Example #37
0
def event_based(job_id, hazard,
                seed, vulnerability_function,
                output_containers,
                conditional_loss_poes, insured_losses,
                time_span, tses,
                loss_curve_resolution, asset_correlation,
                hazard_montecarlo_p):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param dict hazard:
      A dictionary mapping IDs of
      :class:`openquake.engine.db.models.Output` (with output_type set
      to 'gmf_collection') to a tuple where the first element is an
      instance of
      :class:`..hazard_getters.GroundMotionValuesGetter`,
      and the second element is the corresponding weight.
    :param seed:
      the seed used to initialize the rng
    :param dict output_containers: a dictionary mapping hazard Output
      ID to a list (a, b, c, d) where a is the ID of the
      :class:`openquake.engine.db.models.LossCurve` output container used to
      store the computed loss curves; b is the dictionary poe->ID of
      the :class:`openquake.engine.db.models.LossMap` output container used
      to store the computed loss maps; c is the same as a but for
      insured losses; d is the ID of the
      :class:`openquake.engine.db.models.AggregateLossCurve` output container
      used to store the computed loss curves
    :param conditional_loss_poes:
      The poes taken into accout to compute the loss maps
    :param bool insured_losses: True if insured losses should be computed
    :param time_span: the time span considered
    :param tses: time of the stochastic event set
    :param loss_curve_resolution: the curve resolution, i.e. the
    number of points which defines the loss curves
    :param float asset_correlation: a number ranging from 0 to 1
    representing the correlation between the generated loss ratios
    """

    loss_ratio_curves = OrderedDict()
    event_loss_table = dict()

    for hazard_output_id, hazard_data in hazard.items():
        hazard_getter, _ = hazard_data

        (loss_curve_id, loss_map_ids,
         mean_loss_curve_id, quantile_loss_curve_ids,
         insured_curve_id, aggregate_loss_curve_id) = (
             output_containers[hazard_output_id])

        # FIXME(lp). We should not pass the exact same seed for
        # different hazard
        calculator = api.ProbabilisticEventBased(
            vulnerability_function,
            curve_resolution=loss_curve_resolution,
            time_span=time_span,
            tses=tses,
            seed=seed,
            correlation=asset_correlation)

        with logs.tracing('getting input data from db'):
            assets, gmvs_ruptures, missings = hazard_getter()

        if len(assets):
            ground_motion_values = numpy.array(gmvs_ruptures)[:, 0]
            rupture_id_matrix = numpy.array(gmvs_ruptures)[:, 1]
        else:
            # we are relying on the fact that if all the hazard_getter
            # in this task will either return some results or they all
            # return an empty result set.
            logs.LOG.info("Exit from task as no asset could be processed")
            base.signal_task_complete(
                job_id=job_id,
                event_loss_table=dict(),
                num_items=len(missings))
            return

        with logs.tracing('computing risk'):
            loss_ratio_matrix, loss_ratio_curves[hazard_output_id] = (
                calculator(ground_motion_values))

        with logs.tracing('writing results'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                for i, loss_ratio_curve in enumerate(
                        loss_ratio_curves[hazard_output_id]):
                    asset = assets[i]

                    # loss curves
                    general.write_loss_curve(
                        loss_curve_id, asset, loss_ratio_curve)

                    # loss maps
                    for poe in conditional_loss_poes:
                        general.write_loss_map_data(
                            loss_map_ids[poe], asset,
                            scientific.conditional_loss_ratio(
                                loss_ratio_curve, poe))

                    # insured losses
                    if insured_losses:
                        insured_loss_curve = scientific.event_based(
                            scientific.insured_losses(
                                loss_ratio_matrix[i],
                                asset.value,
                                asset.deductible,
                                asset.ins_limit),
                            tses,
                            time_span,
                            loss_curve_resolution)

                        insured_loss_curve.abscissae = (
                            insured_loss_curve.abscissae / asset.value)
                        general.write_loss_curve(
                            insured_curve_id, asset, insured_loss_curve)

                # update the event loss table of this task
                for i, asset in enumerate(assets):
                    for j, rupture_id in enumerate(rupture_id_matrix[i]):
                        loss = loss_ratio_matrix[i][j] * asset.value
                        event_loss_table[rupture_id] = (
                            event_loss_table.get(rupture_id, 0) + loss)

                # update the aggregate losses
                aggregate_losses = sum(
                    loss_ratio_matrix[i] * asset.value
                    for i, asset in enumerate(assets))
                general.update_aggregate_losses(
                    aggregate_loss_curve_id, aggregate_losses)

    # compute mean and quantile loss curves if multiple hazard
    # realizations are computed
    if len(hazard) > 1 and (mean_loss_curve_id or quantile_loss_curve_ids):
        weights = [data[1] for _, data in hazard.items()]

        with logs.tracing('writing curve statistics'):
            with db.transaction.commit_on_success(using='reslt_writer'):
                loss_ratio_curve_matrix = loss_ratio_curves.values()

                # here we are relying on the fact that assets do not
                # change across different logic tree realizations (as
                # the hazard grid does not change, so the hazard
                # getters always returns the same assets)
                for i, asset in enumerate(assets):
                    general.curve_statistics(
                        asset,
                        loss_ratio_curve_matrix[i],
                        weights,
                        mean_loss_curve_id,
                        quantile_loss_curve_ids,
                        hazard_montecarlo_p,
                        assume_equal="image")

    base.signal_task_complete(job_id=job_id,
                              num_items=len(assets) + len(missings),
                              event_loss_table=event_loss_table)
Example #38
0
    def parse_risk_models(self):
        """
        If any risk model is given in the hazard calculation, the
        computation will be driven by risk data. In this case the
        locations will be extracted from the exposure file (if there
        is one) and the imt (and levels) will be extracted from the
        vulnerability model (if there is one)
        """
        logs.LOG.progress("parsing risk models")

        hc = self.hc
        queryset = self.hc.inputs.filter(
            input_type__in=[vf_type
                            for vf_type, _desc
                            in models.Input.VULNERABILITY_TYPE_CHOICES])
        if queryset.exists():
            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            for input_type in queryset:
                content = StringIO.StringIO(
                    input_type.model_content.raw_content_ascii)
                intensity_measure_types_and_levels = dict(
                    (record['IMT'], record['IML']) for record in
                    parsers.VulnerabilityModelParser(content)
                )

                for imt, levels in \
                        intensity_measure_types_and_levels.items():
                    if (imt in hc.intensity_measure_types_and_levels and
                        (set(hc.intensity_measure_types_and_levels[imt]) -
                         set(levels))):
                        logs.LOG.warning(
                            "The same IMT %s is associated with "
                            "different levels" % imt)
                    else:
                        hc.intensity_measure_types_and_levels[imt] = levels

                hc.intensity_measure_types.extend(
                    intensity_measure_types_and_levels)

            # remove possible duplicates
            if hc.intensity_measure_types is not None:
                hc.intensity_measure_types = list(set(
                    hc.intensity_measure_types))
            hc.save()
            logs.LOG.info("Got IMT and levels "
                          "from vulnerability models: %s - %s" % (
                              hc.intensity_measure_types_and_levels,
                              hc.intensity_measure_types))

        queryset = self.hc.inputs.filter(input_type='fragility')
        if queryset.exists():
            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            parser = iter(parsers.FragilityModelParser(
                StringIO.StringIO(
                    queryset.all()[0].model_content.raw_content_ascii)))
            hc = self.hc

            fragility_format, _limit_states = parser.next()

            if (fragility_format == "continuous" and
                    hc.calculation_mode != "scenario"):
                raise NotImplementedError(
                    "Getting IMT and levels from "
                    "a continuous fragility model is not yet supported")

            hc.intensity_measure_types_and_levels = dict(
                (iml['IMT'], iml['imls'])
                for _taxonomy, iml, _params, _no_damage_limit in parser)
            hc.intensity_measure_types.extend(
                hc.intensity_measure_types_and_levels)
            hc.save()
        queryset = self.hc.inputs.filter(input_type='exposure')
        if queryset.exists():
            exposure_model_input = queryset.all()[0]
            content = StringIO.StringIO(
                exposure_model_input.model_content.raw_content_ascii)
            with logs.tracing('storing exposure'):
                exposure.ExposureDBWriter(
                    exposure_model_input).serialize(
                        parsers.ExposureModelParser(content))