Пример #1
0
def compute_gmfs(job_id, sites, rupture_id, gmfcoll_id, realizations):
    """
    Compute ground motion fields and store them in the db.

    :param job_id:
        ID of the currently running job.
    :param sites:
        The subset of the full SiteCollection scanned by this task
    :param rupture_id:
        The parsed rupture model from which we will generate
        ground motion fields.
    :param gmfcoll_id:
        the id of a :class:`openquake.engine.db.models.Gmf` record
    :param realizations:
        Number of realizations to create.
    """

    hc = models.HazardCalculation.objects.get(oqjob=job_id)
    rupture_mdl = source.nrml_to_hazardlib(
        models.ParsedRupture.objects.get(id=rupture_id).nrml,
        hc.rupture_mesh_spacing, None, None)
    imts = [haz_general.imt_to_hazardlib(x)
            for x in hc.intensity_measure_types]
    gsim = AVAILABLE_GSIMS[hc.gsim]()  # instantiate the GSIM class
    correlation_model = haz_general.get_correl_model(hc)

    with EnginePerformanceMonitor('computing gmfs', job_id, gmfs):
        gmf = ground_motion_fields(
            rupture_mdl, sites, imts, gsim,
            hc.truncation_level, realizations=realizations,
            correlation_model=correlation_model)
    with EnginePerformanceMonitor('saving gmfs', job_id, gmfs):
        save_gmf(gmfcoll_id, gmf, sites)
Пример #2
0
def compute_gmfs(job_id, sites, rupture_id, output_id, realizations):
    """
    Compute ground motion fields and store them in the db.

    :param job_id:
        ID of the currently running job.
    :param sites:
        The subset of the full SiteCollection scanned by this task
    :param rupture_id:
        The parsed rupture model from which we will generate
        ground motion fields.
    :param output_id:
        output_id idenfitifies the reference to the output record.
    :param realizations:
        Number of realizations to create.
    """

    hc = models.HazardCalculation.objects.get(oqjob=job_id)
    rupture_mdl = source.nrml_to_hazardlib(
        models.ParsedRupture.objects.get(id=rupture_id).nrml,
        hc.rupture_mesh_spacing, None, None)
    imts = [haz_general.imt_to_hazardlib(x)
            for x in hc.intensity_measure_types]
    gsim = AVAILABLE_GSIMS[hc.gsim]()  # instantiate the GSIM class
    correlation_model = haz_general.get_correl_model(hc)
    gmf = ground_motion_fields(
        rupture_mdl, sites, imts, gsim,
        hc.truncation_level, realizations=realizations,
        correlation_model=correlation_model)
    save_gmf(output_id, gmf, sites.mesh)
Пример #3
0
def compute_gmfs(job_id, rupture_ids, output_id, task_no, realizations):
    """
    Compute ground motion fields and store them in the db.

    :param job_id:
        ID of the currently running job.
    :param rupture_ids:
        List of ids of parsed rupture model from which we will generate
        ground motion fields.
    :param output_id:
        output_id idenfitifies the reference to the output record.
    :param task_no:
        The task_no in which the calculation results will be placed.
        This ID basically corresponds to the sequence number of the task,
        in the context of the entire calculation.
    :param realizations:
        Number of realizations which are going to be created.
    """

    hc = models.HazardCalculation.objects.get(oqjob=job_id)
    rupture_mdl = source.nrml_to_hazardlib(
        models.ParsedRupture.objects.get(id=rupture_ids[0]).nrml,
        hc.rupture_mesh_spacing, None, None)
    imts = [haz_general.imt_to_hazardlib(x)
            for x in hc.intensity_measure_types]
    gsim = AVAILABLE_GSIMS[hc.gsim]
    correlation_model = haz_general.get_correl_model(hc)
    gmf = ground_motion_fields(
        rupture_mdl, hc.site_collection, imts, gsim(),
        hc.truncation_level, realizations=realizations,
        correlation_model=correlation_model)

    save_gmf(output_id, gmf, hc.site_collection.mesh, task_no)
Пример #4
0
def _update_curves(hc, matrices, lt_rlz, src_ids):
    """
    Helper function for updating source, hazard curve, and realization progress
    records in the database.

    This is intended to be used by :func:`compute_hazard_curves`.

    :param hc:
        :class:`openquake.engine.db.models.HazardCalculation` instance.
    :param lt_rlz:
        :class:`openquake.engine.db.models.LtRealization` record for the
        current realization.
    :param src_ids:
        List of source IDs considered for this calculation task.
    """
    with logs.tracing('_update_curves for all IMTs'):
        for imt in hc.intensity_measure_types_and_levels.keys():
            with transaction.commit_on_success():
                logs.LOG.debug('> updating hazard for IMT=%s' % imt)
                hazardlib_imt = haz_general.imt_to_hazardlib(imt)
                query = """
                SELECT * FROM htemp.hazard_curve_progress
                WHERE lt_realization_id = %s
                AND imt = %s
                FOR UPDATE"""
                [hc_progress] = models.HazardCurveProgress.objects.raw(
                    query, [lt_rlz.id, imt])

                hc_progress.result_matrix = update_result_matrix(
                    hc_progress.result_matrix, matrices[hazardlib_imt])
                hc_progress.save()

                logs.LOG.debug('< done updating hazard for IMT=%s' % imt)

        with transaction.commit_on_success():
            # Check here if any of records in source progress model
            # with parsed_source_id from src_ids are marked as complete,
            # and rollback and abort if there is at least one
            src_prog = models.SourceProgress.objects.filter(
                lt_realization=lt_rlz, parsed_source__in=src_ids)

            if any(x.is_complete for x in src_prog):
                msg = (
                    'One or more `source_progress` records were marked as '
                    'complete. This was unexpected and probably means that the'
                    ' calculation workload was not distributed properly.'
                )
                logs.LOG.critical(msg)
                transaction.rollback()
                raise RuntimeError(msg)

            # Mark source_progress records as complete
            src_prog.update(is_complete=True)

            # Update realiation progress,
            # mark realization as complete if it is done
            haz_general.update_realization(lt_rlz.id, len(src_ids))
Пример #5
0
    def __init__(self, imt, site_collection, sites_assets, truncation_level,
                 gsims, correlation_model):
        """
        :param str imt:
            the intensity measure type considered
        :param site_collection:
            a :class:`openquake.engine.db.models.SiteCollection` instance
            holding all the sites of the hazard calculation from which the
            ruptures have been computed
        :param sites_assets:
            an iterator over tuple of the form (site_id, assets), where
            site_id is the id of a
            :class:`openquake.engine.db.models.HazardSite` object and
            assets is a list of asset object associated to such site
        :param float truncation_level:
            the truncation level of the normal distribution used to generate
            random numbers. If none, a non-truncated normal is used
        :param gsims:
            a dictionary of the gsims considered keyed by the tectonic
            region type
        :param correlation_model:
            Instance of correlation model object. See
            :mod:`openquake.hazardlib.correlation`. Can be ``None``, in which
            case non-correlated ground motion fields are calculated.
            Correlation model is not used if ``truncation_level`` is zero.
        """

        self.imt = general.imt_to_hazardlib(imt)
        self.site_collection = site_collection
        self.sites_assets = sites_assets
        self.truncation_level = truncation_level
        self.sites = models.SiteCollection([
            self.site_collection.get_by_id(site_id)
            for site_id, _assets in self.sites_assets
        ])

        all_site_ids = [s.id for s in self.site_collection]
        self.sites_dict = dict(
            (all_site_id, i) for i, all_site_id in enumerate(all_site_ids))

        self.generate_epsilons = truncation_level != 0
        self.correlation_matrix = None
        if self.generate_epsilons:
            if truncation_level is None:
                self.distribution = scipy.stats.norm()
            elif truncation_level > 0:
                self.distribution = scipy.stats.truncnorm(
                    -truncation_level, truncation_level)

            if correlation_model is not None:
                c = correlation_model.get_lower_triangle_correlation_matrix(
                    site_collection, self.imt)
                self.correlation_matrix = c

        self.gsims = gsims
Пример #6
0
    def __init__(self, imt, site_collection, sites_assets,
                 truncation_level, gsims, correlation_model):
        """
        :param str imt:
            the intensity measure type considered
        :param site_collection:
            a :class:`openquake.engine.db.models.SiteCollection` instance
            holding all the sites of the hazard calculation from which the
            ruptures have been computed
        :param sites_assets:
            an iterator over tuple of the form (site_id, assets), where
            site_id is the id of a
            :class:`openquake.engine.db.models.HazardSite` object and
            assets is a list of asset object associated to such site
        :param float truncation_level:
            the truncation level of the normal distribution used to generate
            random numbers. If none, a non-truncated normal is used
        :param gsims:
            a dictionary of the gsims considered keyed by the tectonic
            region type
        :param correlation_model:
            Instance of correlation model object. See
            :mod:`openquake.hazardlib.correlation`. Can be ``None``, in which
            case non-correlated ground motion fields are calculated.
            Correlation model is not used if ``truncation_level`` is zero.
        """

        self.imt = general.imt_to_hazardlib(imt)
        self.site_collection = site_collection
        self.sites_assets = sites_assets
        self.truncation_level = truncation_level
        self.sites = models.SiteCollection(
            [self.site_collection.get_by_id(site_id)
             for site_id, _assets in self.sites_assets])

        all_site_ids = [s.id for s in self.site_collection]
        self.sites_dict = dict((all_site_id, i)
                               for i, all_site_id in enumerate(all_site_ids))

        self.generate_epsilons = truncation_level != 0
        self.correlation_matrix = None
        if self.generate_epsilons:
            if truncation_level is None:
                self.distribution = scipy.stats.norm()
            elif truncation_level > 0:
                self.distribution = scipy.stats.truncnorm(
                    -truncation_level, truncation_level)

            if correlation_model is not None:
                c = correlation_model.get_lower_triangle_correlation_matrix(
                    site_collection, self.imt)
                self.correlation_matrix = c

        self.gsims = gsims
Пример #7
0
def compute_gmf(job_id, params, imt, gsims, ses, site_coll, rupture_ids, rupture_seeds):
    """
    Compute and save the GMFs for all the ruptures in a SES.
    """
    imt = haz_general.imt_to_hazardlib(imt)
    with EnginePerformanceMonitor("reading ruptures", job_id, compute_gmf):
        ruptures = list(models.SESRupture.objects.filter(pk__in=rupture_ids))
    with EnginePerformanceMonitor("computing gmfs", job_id, compute_gmf):
        gmvs_per_site, ruptures_per_site = _compute_gmf(params, imt, gsims, site_coll, ruptures, rupture_seeds)

    with EnginePerformanceMonitor("saving gmfs", job_id, compute_gmf):
        _save_gmfs(ses, imt, gmvs_per_site, ruptures_per_site, site_coll)
Пример #8
0
def compute_gmf(job_id, params, imt, gsims, ses, site_coll,
                rupture_ids, rupture_seeds):
    """
    Compute and save the GMFs for all the ruptures in a SES.
    """
    imt = haz_general.imt_to_hazardlib(imt)
    with EnginePerformanceMonitor(
            'reading ruptures', job_id, compute_gmf):
        ruptures = list(models.SESRupture.objects.filter(pk__in=rupture_ids))
    with EnginePerformanceMonitor(
            'computing gmfs', job_id, compute_gmf):
        gmvs_per_site, ruptures_per_site = _compute_gmf(
            params, imt, gsims, site_coll, ruptures, rupture_seeds)

    with EnginePerformanceMonitor('saving gmfs', job_id, compute_gmf):
        _save_gmfs(ses, imt, gmvs_per_site, ruptures_per_site, site_coll)
Пример #9
0
def compute_gmf_cache(hc, gsims, ruptures, rupture_ids):
    """
    Compute a ground motion field value for each rupture, for all the
    points affected by that rupture, for all IMTs.
    """
    imts = [haz_general.imt_to_hazardlib(x)
            for x in hc.intensity_measure_types]
    correl_model = None
    if hc.ground_motion_correlation_model is not None:
        correl_model = haz_general.get_correl_model(hc)

    n_points = len(hc.site_collection)

    # initialize gmf_cache, a dict imt -> {gmvs, rupture_ids}
    gmf_cache = dict((imt, dict(gmvs=numpy.empty((n_points, 0)),
                                rupture_ids=[]))
                     for imt in imts)

    for rupture, rupture_id in zip(ruptures, rupture_ids):

        # Compute and save ground motion fields
        gmf_calc_kwargs = {
            'rupture': rupture,
            'sites': hc.site_collection,
            'imts': imts,
            'gsim': gsims[rupture.tectonic_region_type],
            'truncation_level': hc.truncation_level,
            'realizations': DEFAULT_GMF_REALIZATIONS,
            'correlation_model': correl_model,
            'rupture_site_filter': filters.rupture_site_distance_filter(
                hc.maximum_distance),
        }
        gmf_dict = gmf.ground_motion_fields(**gmf_calc_kwargs)

        # update the gmf cache:
        for imt_key, v in gmf_dict.iteritems():
            gmf_cache[imt_key]['gmvs'] = numpy.append(
                gmf_cache[imt_key]['gmvs'], v, axis=1)
            gmf_cache[imt_key]['rupture_ids'].append(rupture_id)

    return gmf_cache
Пример #10
0
def _update_curves(hc, matrices, lt_rlz):
    """
    Helper function for updating source, hazard curve, and realization progress
    records in the database.

    This is intended to be used by :func:`compute_hazard_curves`.

    :param hc:
        :class:`openquake.engine.db.models.HazardCalculation` instance.
    :param lt_rlz:
        :class:`openquake.engine.db.models.LtRealization` record for the
        current realization.
    """
    with logs.tracing('_update_curves for all IMTs'):
        for imt in hc.intensity_measure_types_and_levels.keys():
            hazardlib_imt = haz_general.imt_to_hazardlib(imt)
            matrix = matrices[hazardlib_imt]
            if (matrix == 0.0).all():
                # The matrix for this IMT is all zeros; there's no reason to
                # update `hazard_curve_progress` records.
                logs.LOG.debug('* No hazard contribution for IMT=%s' % imt)
                continue
            else:
                # The is some contribution here to the hazard; we need to
                # update.
                with transaction.commit_on_success():
                    logs.LOG.debug('> updating hazard for IMT=%s' % imt)
                    query = """
                    SELECT * FROM htemp.hazard_curve_progress
                    WHERE lt_realization_id = %s
                    AND imt = %s
                    FOR UPDATE"""
                    [hc_progress] = models.HazardCurveProgress.objects.raw(
                        query, [lt_rlz.id, imt])

                    hc_progress.result_matrix = update_result_matrix(
                        hc_progress.result_matrix, matrix)
                    hc_progress.save()

                    logs.LOG.debug('< done updating hazard for IMT=%s' % imt)
Пример #11
0
def ses_and_gmfs(job_id, src_ids, lt_rlz_id, task_seed, result_grp_ordinal):
    """
    Celery task for the stochastic event set calculator.

    Samples logic trees and calls the stochastic event set calculator.

    Once stochastic event sets are calculated, results will be saved to the
    database. See :class:`openquake.engine.db.models.SESCollection`.

    Optionally (specified in the job configuration using the
    `ground_motion_fields` parameter), GMFs can be computed from each rupture
    in each stochastic event set. GMFs are also saved to the database.

    Once all of this work is complete, a signal will be sent via AMQP to let
    the control noe know that the work is complete. (If there is any work left
    to be dispatched, this signal will indicate to the control node that more
    work can be enqueued.)

    :param int job_id:
        ID of the currently running job.
    :param src_ids:
        List of ids of parsed source models from which we will generate
        stochastic event sets/ruptures.
    :param lt_rlz_id:
        Id of logic tree realization model to calculate for.
    :param int task_seed:
        Value for seeding numpy/scipy in the computation of stochastic event
        sets and ground motion fields.
    :param int result_grp_ordinal:
        The result group in which the calculation results will be placed.
        This ID basically corresponds to the sequence number of the task,
        in the context of the entire calculation.
    """
    logs.LOG.debug(('> starting `stochastic_event_sets` task: job_id=%s, '
                    'lt_realization_id=%s') % (job_id, lt_rlz_id))
    numpy.random.seed(task_seed)

    hc = models.HazardCalculation.objects.get(oqjob=job_id)

    cmplt_lt_ses = None
    if hc.complete_logic_tree_ses:
        cmplt_lt_ses = models.SES.objects.get(
            ses_collection__output__oq_job=job_id,
            complete_logic_tree_ses=True)

    if hc.ground_motion_fields:
        # For ground motion field calculation, we need the points of interest
        # for the calculation.
        points_to_compute = hc.points_to_compute()

        imts = [haz_general.imt_to_hazardlib(x)
                for x in hc.intensity_measure_types]

        correl_model = None
        if hc.ground_motion_correlation_model is not None:
            correl_model = haz_general.get_correl_model(hc)

    lt_rlz = models.LtRealization.objects.get(id=lt_rlz_id)
    ltp = logictree.LogicTreeProcessor(hc.id)

    apply_uncertainties = ltp.parse_source_model_logictree_path(
            lt_rlz.sm_lt_path)
    gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)

    sources = list(haz_general.gen_sources(
        src_ids, apply_uncertainties, hc.rupture_mesh_spacing,
        hc.width_of_mfd_bin, hc.area_source_discretization))

    # Compute stochastic event sets
    # For each rupture generated, we can optionally calculate a GMF
    for ses_rlz_n in xrange(1, hc.ses_per_logic_tree_path + 1):
        logs.LOG.debug('> computing stochastic event set %s of %s'
                       % (ses_rlz_n, hc.ses_per_logic_tree_path))

        # This is the container for all ruptures for this stochastic event set
        # (specified by `ordinal` and the logic tree realization).
        # NOTE: Many tasks can contribute ruptures to this SES.
        ses = models.SES.objects.get(
            ses_collection__lt_realization=lt_rlz, ordinal=ses_rlz_n)

        sources_sites = ((src, hc.site_collection) for src in sources)
        ssd_filter = filters.source_site_distance_filter(hc.maximum_distance)
        # Get the filtered sources, ignore the site collection:
        filtered_sources = (src for src, _ in ssd_filter(sources_sites))
        # Calculate stochastic event sets:
        logs.LOG.debug('> computing stochastic event sets')
        if hc.ground_motion_fields:
            gmf_cache = _create_gmf_cache(len(points_to_compute), imts)

            logs.LOG.debug('> computing also ground motion fields')
            # This will be the "container" for all computed ground motion field
            # results for this stochastic event set.
            gmf_set = models.GmfSet.objects.get(
                gmf_collection__lt_realization=lt_rlz, ses_ordinal=ses_rlz_n)

        ses_poissonian = stochastic.stochastic_event_set_poissonian(
            filtered_sources, hc.investigation_time)

        logs.LOG.debug('> looping over ruptures')
        rupture_ordinal = 0
        for rupture in ses_poissonian:
            rupture_ordinal += 1

            # Prepare and save SES ruptures to the db:
            logs.LOG.debug('> saving SES rupture to DB')
            _save_ses_rupture(
                ses, rupture, cmplt_lt_ses, result_grp_ordinal,
                rupture_ordinal)
            logs.LOG.debug('> done saving SES rupture to DB')

            # Compute ground motion fields (if requested)
            logs.LOG.debug('compute ground motion fields?  %s'
                           % hc.ground_motion_fields)
            if hc.ground_motion_fields:
                # Compute and save ground motion fields

                gmf_calc_kwargs = {
                    'rupture': rupture,
                    'sites': hc.site_collection,
                    'imts': imts,
                    'gsim': gsims[rupture.tectonic_region_type],
                    'truncation_level': hc.truncation_level,
                    'realizations': DEFAULT_GMF_REALIZATIONS,
                    'correlation_model': correl_model,
                    'rupture_site_filter':
                        filters.rupture_site_distance_filter(
                            hc.maximum_distance),
                }
                logs.LOG.debug('> computing ground motion fields')
                gmf_dict = gmf_calc.ground_motion_fields(**gmf_calc_kwargs)
                logs.LOG.debug('< done computing ground motion fields')

                # update the gmf cache:
                for k, v in gmf_dict.iteritems():
                    gmf_cache[k] = numpy.append(
                        gmf_cache[k], v, axis=1)

        logs.LOG.debug('< Done looping over ruptures')
        logs.LOG.debug('%s ruptures computed for SES realization %s of %s'
                       % (rupture_ordinal, ses_rlz_n,
                          hc.ses_per_logic_tree_path))
        logs.LOG.debug('< done computing stochastic event set %s of %s'
                       % (ses_rlz_n, hc.ses_per_logic_tree_path))

        if hc.ground_motion_fields:
            # save the GMFs to the DB
            logs.LOG.debug('> saving GMF results to DB')
            _save_gmfs(
                gmf_set, gmf_cache, points_to_compute, result_grp_ordinal)
            logs.LOG.debug('< done saving GMF results to DB')

    logs.LOG.debug('< task complete, signalling completion')
    base.signal_task_complete(job_id=job_id, num_items=len(src_ids))
Пример #12
0
def compute_disagg(job_id, sites, lt_rlz_id, ltp):
    """
    Calculate disaggregation histograms and saving the results to the database.

    Here is the basic calculation workflow:

    1. Get all sources
    2. Get IMTs
    3. Get the hazard curve for each point, IMT, and realization
    4. For each `poes_disagg`, interpolate the IML for each curve.
    5. Get GSIMs, TOM (Temporal Occurence Model), and truncation level.
    6. Get histogram bin edges.
    7. Prepare calculation args.
    8. Call the hazardlib calculator
       (see :func:`openquake.hazardlib.calc.disagg.disaggregation`
       for more info).

    :param int job_id:
        ID of the currently running :class:`openquake.engine.db.models.OqJob`
    :param list sites:
        `list` of :class:`openquake.hazardlib.site.Site` objects, which
        indicate the locations (and associated soil parameters) for which we
        need to compute disaggregation histograms.
    :param int lt_rlz_id:
        ID of the :class:`openquake.engine.db.models.LtRealization` for which
        we want to compute disaggregation histograms. This realization will
        determine which hazard curve results to use as a basis for the
        calculation.
    :param ltp:
        a :class:`openquake.engine.input.LogicTreeProcessor` instance
    """
    # Silencing 'Too many local variables'
    # pylint: disable=R0914
    logs.LOG.debug(
        '> computing disaggregation for %(np)s sites for realization %(rlz)s'
        % dict(np=len(sites), rlz=lt_rlz_id))

    job = models.OqJob.objects.get(id=job_id)
    hc = job.hazard_calculation
    lt_rlz = models.LtRealization.objects.get(id=lt_rlz_id)
    apply_uncertainties = ltp.parse_source_model_logictree_path(
        lt_rlz.sm_lt_path)
    gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)

    src_ids = models.SourceProgress.objects.filter(lt_realization=lt_rlz)\
        .order_by('id').values_list('parsed_source_id', flat=True)
    sources = [apply_uncertainties(s.nrml)
               for s in models.ParsedSource.objects.filter(pk__in=src_ids)]

    # Make filters for distance to source and distance to rupture:
    # a better approach would be to filter the sources on distance
    # before, see the comment in the classical calculator
    src_site_filter = openquake.hazardlib.calc.filters.\
        source_site_distance_filter(hc.maximum_distance)
    rup_site_filter = openquake.hazardlib.calc.filters.\
        rupture_site_distance_filter(hc.maximum_distance)

    for imt, imls in hc.intensity_measure_types_and_levels.iteritems():
        hazardlib_imt = haz_general.imt_to_hazardlib(imt)
        hc_im_type, sa_period, sa_damping = models.parse_imt(imt)

        imls = numpy.array(imls[::-1])

        # loop over sites
        for site in sites:
            # get curve for this point/IMT/realization
            [curve] = models.HazardCurveData.objects.filter(
                location=site.location.wkt2d,
                hazard_curve__lt_realization=lt_rlz_id,
                hazard_curve__imt=hc_im_type,
                hazard_curve__sa_period=sa_period,
                hazard_curve__sa_damping=sa_damping,
            )

            # If the hazard curve is all zeros, don't even do the
            # disagg calculation.
            if all(x == 0.0 for x in curve.poes):
                logs.LOG.debug(
                    '* hazard curve contained all 0 probability values; '
                    'skipping')
                continue

            for poe in hc.poes_disagg:
                iml = numpy.interp(poe, curve.poes[::-1], imls)
                calc_kwargs = {
                    'sources': sources,
                    'site': site,
                    'imt': hazardlib_imt,
                    'iml': iml,
                    'gsims': gsims,
                    'time_span': hc.investigation_time,
                    'truncation_level': hc.truncation_level,
                    'n_epsilons': hc.num_epsilon_bins,
                    'mag_bin_width': hc.mag_bin_width,
                    'dist_bin_width': hc.distance_bin_width,
                    'coord_bin_width': hc.coordinate_bin_width,
                    'source_site_filter': src_site_filter,
                    'rupture_site_filter': rup_site_filter,
                }
                with EnginePerformanceMonitor(
                        'computing disaggregation', job_id, disagg_task):
                    bin_edges, diss_matrix = openquake.hazardlib.calc.\
                        disagg.disaggregation_poissonian(**calc_kwargs)
                    if not bin_edges:  # no ruptures generated
                        continue

                with EnginePerformanceMonitor(
                        'saving disaggregation', job_id, disagg_task):
                    _save_disagg_matrix(
                        job, site, bin_edges, diss_matrix, lt_rlz,
                        hc.investigation_time, hc_im_type, iml, poe, sa_period,
                        sa_damping
                    )

    with transaction.commit_on_success():
        # Update realiation progress,
        # mark realization as complete if it is done
        haz_general.update_realization(lt_rlz_id, len(sites))

    logs.LOG.debug('< done computing disaggregation')
Пример #13
0
def compute_hazard_curves(job_id, src_ids, lt_rlz_id):
    """
    Celery task for hazard curve calculator.

    Samples logic trees, gathers site parameters, and calls the hazard curve
    calculator.

    Once hazard curve data is computed, result progress updated (within a
    transaction, to prevent race conditions) in the
    `htemp.hazard_curve_progress` table.

    Once all of this work is complete, a signal will be sent via AMQP to let
    the control node know that the work is complete. (If there is any work left
    to be dispatched, this signal will indicate to the control node that more
    work can be enqueued.)

    :param int job_id:
        ID of the currently running job.
    :param src_ids:
        List of ids of parsed source models to take into account.
    :param lt_rlz_id:
        Id of logic tree realization model to calculate for.
    """
    hc = models.HazardCalculation.objects.get(oqjob=job_id)

    lt_rlz = models.LtRealization.objects.get(id=lt_rlz_id)
    ltp = logictree.LogicTreeProcessor(hc.id)

    apply_uncertainties = ltp.parse_source_model_logictree_path(
        lt_rlz.sm_lt_path)
    gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)

    sources = haz_general.gen_sources(
        src_ids, apply_uncertainties, hc.rupture_mesh_spacing,
        hc.width_of_mfd_bin, hc.area_source_discretization)

    imts = haz_general.im_dict_to_hazardlib(
        hc.intensity_measure_types_and_levels)

    # Prepare args for the calculator.
    calc_kwargs = {'gsims': gsims,
                   'truncation_level': hc.truncation_level,
                   'time_span': hc.investigation_time,
                   'sources': sources,
                   'imts': imts,
                   'sites': hc.site_collection}

    if hc.maximum_distance:
        dist = hc.maximum_distance
        calc_kwargs['source_site_filter'] = (
            openquake.hazardlib.calc.filters.source_site_distance_filter(dist))
        calc_kwargs['rupture_site_filter'] = (
            openquake.hazardlib.calc.filters.rupture_site_distance_filter(
                dist))

    # mapping "imt" to 2d array of hazard curves: first dimension -- sites,
    # second -- IMLs
    logs.LOG.debug('> computing hazard matrices')
    matrices = openquake.hazardlib.calc.hazard_curve.hazard_curves_poissonian(
        **calc_kwargs)
    logs.LOG.debug('< done computing hazard matrices')

    logs.LOG.debug('> starting transaction')
    with transaction.commit_on_success():
        logs.LOG.debug('looping over IMTs')

        for imt in hc.intensity_measure_types_and_levels.keys():
            logs.LOG.debug('> updating hazard for IMT=%s' % imt)
            hazardlib_imt = haz_general.imt_to_hazardlib(imt)
            query = """
            SELECT * FROM htemp.hazard_curve_progress
            WHERE lt_realization_id = %s
            AND imt = %s
            FOR UPDATE"""
            [hc_progress] = models.HazardCurveProgress.objects.raw(
                query, [lt_rlz.id, imt])

            hc_progress.result_matrix = update_result_matrix(
                hc_progress.result_matrix, matrices[hazardlib_imt])
            hc_progress.save()

            logs.LOG.debug('< done updating hazard for IMT=%s' % imt)

        # Before the transaction completes:

        # Check here if any of records in source progress model
        # with parsed_source_id from src_ids are marked as complete,
        # and rollback and abort if there is at least one
        src_prog = models.SourceProgress.objects.filter(
            lt_realization=lt_rlz, parsed_source__in=src_ids)

        if any(x.is_complete for x in src_prog):
            msg = (
                'One or more `source_progress` records were marked as '
                'complete. This was unexpected and probably means that the'
                ' calculation workload was not distributed properly.'
            )
            logs.LOG.critical(msg)
            transaction.rollback()
            raise RuntimeError(msg)

        # Mark source_progress records as complete
        src_prog.update(is_complete=True)

        # Update realiation progress,
        # mark realization as complete if it is done
        haz_general.update_realization(lt_rlz.id, len(src_ids))

    logs.LOG.debug('< transaction complete')
Пример #14
0
def _update_curves(hc, matrices, lt_rlz, src_ids):
    """
    Helper function for updating source, hazard curve, and realization progress
    records in the database.

    This is intended to be used by :func:`compute_hazard_curves`.

    :param hc:
        :class:`openquake.engine.db.models.HazardCalculation` instance.
    :param lt_rlz:
        :class:`openquake.engine.db.models.LtRealization` record for the
        current realization.
    :param src_ids:
        List of source IDs considered for this calculation task.
    """
    with logs.tracing('_update_curves for all IMTs'):
        for imt in hc.intensity_measure_types_and_levels.keys():
            hazardlib_imt = haz_general.imt_to_hazardlib(imt)
            matrix = matrices[hazardlib_imt]
            if (matrix == 0.0).all():
                # The matrix for this IMT is all zeros; there's no reason to
                # update `hazard_curve_progress` records.
                logs.LOG.debug('* No hazard contribution for IMT=%s' % imt)
                continue
            else:
                # The is some contribution here to the hazard; we need to
                # update.
                with transaction.commit_on_success():
                    logs.LOG.debug('> updating hazard for IMT=%s' % imt)
                    query = """
                    SELECT * FROM htemp.hazard_curve_progress
                    WHERE lt_realization_id = %s
                    AND imt = %s
                    FOR UPDATE"""
                    [hc_progress] = models.HazardCurveProgress.objects.raw(
                        query, [lt_rlz.id, imt])

                    hc_progress.result_matrix = update_result_matrix(
                        hc_progress.result_matrix, matrix)
                    hc_progress.save()

                    logs.LOG.debug('< done updating hazard for IMT=%s' % imt)

        with transaction.commit_on_success():
            # Check here if any of records in source progress model
            # with parsed_source_id from src_ids are marked as complete,
            # and rollback and abort if there is at least one
            src_prog = models.SourceProgress.objects.filter(
                lt_realization=lt_rlz, parsed_source__in=src_ids)

            if any(x.is_complete for x in src_prog):
                msg = (
                    'One or more `source_progress` records were marked as '
                    'complete. This was unexpected and probably means that the'
                    ' calculation workload was not distributed properly.')
                logs.LOG.critical(msg)
                transaction.rollback()
                raise RuntimeError(msg)

            # Mark source_progress records as complete
            src_prog.update(is_complete=True)

            # Update realiation progress,
            # mark realization as complete if it is done
            haz_general.update_realization(lt_rlz.id, len(src_ids))
Пример #15
0
def compute_disagg(job_id, sites, lt_rlz_id, ltp):
    """
    Calculate disaggregation histograms and saving the results to the database.

    Here is the basic calculation workflow:

    1. Get all sources
    2. Get IMTs
    3. Get the hazard curve for each point, IMT, and realization
    4. For each `poes_disagg`, interpolate the IML for each curve.
    5. Get GSIMs, TOM (Temporal Occurence Model), and truncation level.
    6. Get histogram bin edges.
    7. Prepare calculation args.
    8. Call the hazardlib calculator
       (see :func:`openquake.hazardlib.calc.disagg.disaggregation`
       for more info).

    :param int job_id:
        ID of the currently running :class:`openquake.engine.db.models.OqJob`
    :param list sites:
        `list` of :class:`openquake.hazardlib.site.Site` objects, which
        indicate the locations (and associated soil parameters) for which we
        need to compute disaggregation histograms.
    :param int lt_rlz_id:
        ID of the :class:`openquake.engine.db.models.LtRealization` for which
        we want to compute disaggregation histograms. This realization will
        determine which hazard curve results to use as a basis for the
        calculation.
    :param ltp:
        a :class:`openquake.engine.input.LogicTreeProcessor` instance
    """
    # Silencing 'Too many local variables'
    # pylint: disable=R0914
    logs.LOG.debug(
        '> computing disaggregation for %(np)s sites for realization %(rlz)s' %
        dict(np=len(sites), rlz=lt_rlz_id))

    job = models.OqJob.objects.get(id=job_id)
    hc = job.hazard_calculation
    lt_rlz = models.LtRealization.objects.get(id=lt_rlz_id)
    apply_uncertainties = ltp.parse_source_model_logictree_path(
        lt_rlz.sm_lt_path)
    gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)

    src_ids = models.SourceProgress.objects.filter(lt_realization=lt_rlz)\
        .order_by('id').values_list('parsed_source_id', flat=True)
    sources = [
        apply_uncertainties(s.nrml)
        for s in models.ParsedSource.objects.filter(pk__in=src_ids)
    ]

    # Make filters for distance to source and distance to rupture:
    # a better approach would be to filter the sources on distance
    # before, see the comment in the classical calculator
    src_site_filter = openquake.hazardlib.calc.filters.\
        source_site_distance_filter(hc.maximum_distance)
    rup_site_filter = openquake.hazardlib.calc.filters.\
        rupture_site_distance_filter(hc.maximum_distance)

    for imt, imls in hc.intensity_measure_types_and_levels.iteritems():
        hazardlib_imt = haz_general.imt_to_hazardlib(imt)
        hc_im_type, sa_period, sa_damping = models.parse_imt(imt)

        imls = numpy.array(imls[::-1])

        # loop over sites
        for site in sites:
            # get curve for this point/IMT/realization
            [curve] = models.HazardCurveData.objects.filter(
                location=site.location.wkt2d,
                hazard_curve__lt_realization=lt_rlz_id,
                hazard_curve__imt=hc_im_type,
                hazard_curve__sa_period=sa_period,
                hazard_curve__sa_damping=sa_damping,
            )

            # If the hazard curve is all zeros, don't even do the
            # disagg calculation.
            if all([x == 0.0 for x in curve.poes]):
                logs.LOG.debug(
                    '* hazard curve contained all 0 probability values; '
                    'skipping')
                continue

            for poe in hc.poes_disagg:
                iml = numpy.interp(poe, curve.poes[::-1], imls)
                calc_kwargs = {
                    'sources': sources,
                    'site': site,
                    'imt': hazardlib_imt,
                    'iml': iml,
                    'gsims': gsims,
                    'time_span': hc.investigation_time,
                    'truncation_level': hc.truncation_level,
                    'n_epsilons': hc.num_epsilon_bins,
                    'mag_bin_width': hc.mag_bin_width,
                    'dist_bin_width': hc.distance_bin_width,
                    'coord_bin_width': hc.coordinate_bin_width,
                    'source_site_filter': src_site_filter,
                    'rupture_site_filter': rup_site_filter,
                }
                with EnginePerformanceMonitor('computing disaggregation',
                                              job_id, disagg_task):
                    bin_edges, diss_matrix = openquake.hazardlib.calc.\
                        disagg.disaggregation_poissonian(**calc_kwargs)
                    if not bin_edges:  # no ruptures generated
                        continue

                with EnginePerformanceMonitor('saving disaggregation', job_id,
                                              disagg_task):
                    _save_disagg_matrix(job, site, bin_edges, diss_matrix,
                                        lt_rlz, hc.investigation_time,
                                        hc_im_type, iml, poe, sa_period,
                                        sa_damping)

    with transaction.commit_on_success():
        # Update realiation progress,
        # mark realization as complete if it is done
        haz_general.update_realization(lt_rlz_id, len(sites))

    logs.LOG.debug('< done computing disaggregation')