Exemplo n.º 1
0
    def test_get_site_collection_with_reference_parameters(self):
        cfg = helpers.demo_file(
            'simple_fault_demo_hazard/job.ini')
        job = helpers.get_hazard_job(cfg, username=getpass.getuser())

        site_coll = general.get_site_collection(job.hazard_calculation)

        # all of the parameters should be the same:
        self.assertTrue((site_coll.vs30 == 760).all())
        self.assertTrue((site_coll.vs30measured).all())
        self.assertTrue((site_coll.z1pt0 == 5).all())
        self.assertTrue((site_coll.z2pt5 == 100).all())

        # just for sanity, make sure the meshes are correct (the locations)
        job_mesh = job.hazard_calculation.points_to_compute()
        self.assertTrue((job_mesh.lons == site_coll.mesh.lons).all())
        self.assertTrue((job_mesh.lats == site_coll.mesh.lats).all())
Exemplo n.º 2
0
    def test_get_site_collection_with_site_model(self):
        cfg = helpers.demo_file(
            'simple_fault_demo_hazard/job_with_site_model.ini')
        job = helpers.get_hazard_job(cfg)
        calc = cls_core.ClassicalHazardCalculator(job)

        # Bootstrap the `site_data` table:
        calc.initialize_sources()
        calc.initialize_site_model()

        site_coll = general.get_site_collection(job.hazard_calculation)
        # Since we're using a pretty big site model, it's a bit excessive to
        # check each and every value.
        # Instead, we'll just test that the lenth of each site collection attr
        # is equal to the number of points of interest in the calculation.
        expected_len = len(job.hazard_calculation.points_to_compute())

        self.assertEqual(expected_len, len(site_coll))
        self.assertEqual(expected_len, len(site_coll.vs30))
        self.assertEqual(expected_len, len(site_coll.vs30measured))
        self.assertEqual(expected_len, len(site_coll.z1pt0))
        self.assertEqual(expected_len, len(site_coll.z2pt5))
Exemplo n.º 3
0
def ses_and_gmfs(job_id, src_ids, lt_rlz_id, task_seed):
    """
    Celery task for the stochastic event set calculator.

    Samples logic trees and calls the stochastic event set calculator.

    Once stochastic event sets are calculated, results will be saved to the
    database. See :class:`openquake.db.models.SESCollection`.

    Optionally (specified in the job configuration using the
    `ground_motion_fields` parameter), GMFs can be computed from each rupture
    in each stochastic event set. GMFs are also saved to the database.

    Once all of this work is complete, a signal will be sent via AMQP to let
    the control noe know that the work is complete. (If there is any work left
    to be dispatched, this signal will indicate to the control node that more
    work can be enqueued.)

    :param int job_id:
        ID of the currently running job.
    :param src_ids:
        List of ids of parsed source models from which we will generate
        stochastic event sets/ruptures.
    :param lt_rlz_id:
        Id of logic tree realization model to calculate for.
    :param int task_seed:
        Value for seeding numpy/scipy in the computation of stochastic event
        sets and ground motion fields.
    """
    logs.LOG.debug(
        ("> starting `stochastic_event_sets` task: job_id=%s, " "lt_realization_id=%s") % (job_id, lt_rlz_id)
    )
    numpy.random.seed(task_seed)

    hc = models.HazardCalculation.objects.get(oqjob=job_id)

    cmplt_lt_ses = None
    if hc.complete_logic_tree_ses:
        cmplt_lt_ses = models.SES.objects.get(ses_collection__output__oq_job=job_id, complete_logic_tree_ses=True)

    cmplt_lt_gmf = None
    if hc.complete_logic_tree_gmf:
        cmplt_lt_gmf = models.GmfSet.objects.get(gmf_collection__output__oq_job=job_id, complete_logic_tree_gmf=True)

    if hc.ground_motion_fields:
        # For ground motion field calculation, we need the points of interest
        # for the calculation.
        points_to_compute = hc.points_to_compute()

    lt_rlz = models.LtRealization.objects.get(id=lt_rlz_id)
    ltp = logictree.LogicTreeProcessor(hc.id)

    apply_uncertainties = ltp.parse_source_model_logictree_path(lt_rlz.sm_lt_path)
    gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)

    sources = list(
        haz_general.gen_sources(
            src_ids, apply_uncertainties, hc.rupture_mesh_spacing, hc.width_of_mfd_bin, hc.area_source_discretization
        )
    )

    logs.LOG.debug("> creating site collection")
    site_coll = haz_general.get_site_collection(hc)
    logs.LOG.debug("< done creating site collection")

    if hc.ground_motion_fields:
        imts = [haz_general.imt_to_nhlib(x) for x in hc.intensity_measure_types]

        correl_model = None
        if hc.ground_motion_correlation_model is not None:
            correl_model = _get_correl_model(hc)

    # Compute stochastic event sets
    # For each rupture generated, we can optionally calculate a GMF
    for ses_rlz_n in xrange(1, hc.ses_per_logic_tree_path + 1):
        logs.LOG.debug("> computing stochastic event set %s of %s" % (ses_rlz_n, hc.ses_per_logic_tree_path))

        # This is the container for all ruptures for this stochastic event set
        # (specified by `ordinal` and the logic tree realization).
        # NOTE: Many tasks can contribute ruptures to this SES.
        ses = models.SES.objects.get(ses_collection__lt_realization=lt_rlz, ordinal=ses_rlz_n)

        sources_sites = ((src, site_coll) for src in sources)
        ssd_filter = filters.source_site_distance_filter(hc.maximum_distance)
        # Get the filtered sources, ignore the site collection:
        filtered_sources = (src for src, _ in ssd_filter(sources_sites))
        # Calculate stochastic event sets:
        logs.LOG.debug("> computing stochastic event sets")
        if hc.ground_motion_fields:
            logs.LOG.debug("> computing also ground motion fields")
            # This will be the "container" for all computed ground motion field
            # results for this stochastic event set.
            gmf_set = models.GmfSet.objects.get(gmf_collection__lt_realization=lt_rlz, ses_ordinal=ses_rlz_n)

        ses_poissonian = stochastic.stochastic_event_set_poissonian(filtered_sources, hc.investigation_time)

        logs.LOG.debug("> looping over ruptures")
        rupture_ctr = 0
        for rupture in ses_poissonian:
            # Prepare and save SES ruptures to the db:
            logs.LOG.debug("> saving SES rupture to DB")
            _save_ses_rupture(ses, rupture, cmplt_lt_ses)
            logs.LOG.debug("> done saving SES rupture to DB")

            # Compute ground motion fields (if requested)
            logs.LOG.debug("compute ground motion fields?  %s" % hc.ground_motion_fields)
            if hc.ground_motion_fields:
                # Compute and save ground motion fields

                gmf_calc_kwargs = {
                    "rupture": rupture,
                    "sites": site_coll,
                    "imts": imts,
                    "gsim": gsims[rupture.tectonic_region_type],
                    "truncation_level": hc.truncation_level,
                    "realizations": DEFAULT_GMF_REALIZATIONS,
                    "correlation_model": correl_model,
                    "rupture_site_filter": filters.rupture_site_distance_filter(hc.maximum_distance),
                }
                logs.LOG.debug("> computing ground motion fields")
                gmf_dict = gmf_calc.ground_motion_fields(**gmf_calc_kwargs)
                logs.LOG.debug("< done computing ground motion fields")

                logs.LOG.debug("> saving GMF results to DB")
                _save_gmf_nodes(gmf_set, gmf_dict, points_to_compute, cmplt_lt_gmf)
                logs.LOG.debug("< done saving GMF results to DB")
            rupture_ctr += 1

        logs.LOG.debug("< Done looping over ruptures")
        logs.LOG.debug(
            "%s ruptures computed for SES realization %s of %s" % (rupture_ctr, ses_rlz_n, hc.ses_per_logic_tree_path)
        )
        logs.LOG.debug("< done computing stochastic event set %s of %s" % (ses_rlz_n, hc.ses_per_logic_tree_path))

    logs.LOG.debug("< task complete, signalling completion")
    haz_general.signal_task_complete(job_id, len(src_ids))
Exemplo n.º 4
0
def compute_hazard_curves(job_id, src_ids, lt_rlz_id):
    """
    Celery task for hazard curve calculator.

    Samples logic trees, gathers site parameters, and calls the hazard curve
    calculator.

    Once hazard curve data is computed, result progress updated (within a
    transaction, to prevent race conditions) in the
    `htemp.hazard_curve_progress` table.

    Once all of this work is complete, a signal will be sent via AMQP to let
    the control node know that the work is complete. (If there is any work left
    to be dispatched, this signal will indicate to the control node that more
    work can be enqueued.)

    :param int job_id:
        ID of the currently running job.
    :param src_ids:
        List of ids of parsed source models to take into account.
    :param lt_rlz_id:
        Id of logic tree realization model to calculate for.
    """
    hc = models.HazardCalculation.objects.get(oqjob=job_id)

    lt_rlz = models.LtRealization.objects.get(id=lt_rlz_id)
    ltp = logictree.LogicTreeProcessor(hc.id)

    apply_uncertainties = ltp.parse_source_model_logictree_path(
            lt_rlz.sm_lt_path)
    gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)

    sources = haz_general.gen_sources(
        src_ids, apply_uncertainties, hc.rupture_mesh_spacing,
        hc.width_of_mfd_bin, hc.area_source_discretization)

    imts = haz_general.im_dict_to_nhlib(hc.intensity_measure_types_and_levels)

    # Now initialize the site collection for use in the calculation.
    # If there is no site model defined, we will use the same reference
    # parameters (defined in the HazardCalculation) for every site.

    # TODO: We could just create the SiteCollection once, pickle it, and store
    # it in the DB (in SiteData). Creating the SiteCollection isn't an
    # expensive operation (at least for small calculations), but this is
    # wasted work.
    logs.LOG.debug('> creating site collection')
    site_coll = haz_general.get_site_collection(hc)
    logs.LOG.debug('< done creating site collection')

    # Prepare args for the calculator.
    calc_kwargs = {'gsims': gsims,
                   'truncation_level': hc.truncation_level,
                   'time_span': hc.investigation_time,
                   'sources': sources,
                   'imts': imts,
                   'sites': site_coll}

    if hc.maximum_distance:
        dist = hc.maximum_distance
        calc_kwargs['source_site_filter'] = (
                nhlib.calc.filters.source_site_distance_filter(dist))
        calc_kwargs['rupture_site_filter'] = (
                nhlib.calc.filters.rupture_site_distance_filter(dist))

    # mapping "imt" to 2d array of hazard curves: first dimension -- sites,
    # second -- IMLs
    logs.LOG.debug('> computing hazard matrices')
    matrices = nhlib.calc.hazard_curve.hazard_curves_poissonian(**calc_kwargs)
    logs.LOG.debug('< done computing hazard matrices')

    logs.LOG.debug('> starting transaction')
    with transaction.commit_on_success():
        logs.LOG.debug('looping over IMTs')

        for imt in hc.intensity_measure_types_and_levels.keys():
            logs.LOG.debug('> updating hazard for IMT=%s' % imt)
            nhlib_imt = haz_general.imt_to_nhlib(imt)
            query = """
            SELECT * FROM htemp.hazard_curve_progress
            WHERE lt_realization_id = %s
            AND imt = %s
            FOR UPDATE"""
            [hc_progress] = models.HazardCurveProgress.objects.raw(
                query, [lt_rlz.id, imt])

            hc_progress.result_matrix = update_result_matrix(
                hc_progress.result_matrix, matrices[nhlib_imt])
            hc_progress.save()

            logs.LOG.debug('< done updating hazard for IMT=%s' % imt)

        # Before the transaction completes:

        # Check here if any of records in source progress model
        # with parsed_source_id from src_ids are marked as complete,
        # and rollback and abort if there is at least one
        src_prog = models.SourceProgress.objects.filter(
            lt_realization=lt_rlz, parsed_source__in=src_ids)

        if any(x.is_complete for x in src_prog):
            msg = (
                'One or more `source_progress` records were marked as '
                'complete. This was unexpected and probably means that the'
                ' calculation workload was not distributed properly.'
            )
            logs.LOG.critical(msg)
            transaction.rollback()
            raise RuntimeError(msg)

        # Mark source_progress records as complete
        src_prog.update(is_complete=True)

        # Update realiation progress,
        # mark realization as complete if it is done
        # First, refresh the logic tree realization record:
        ltr_query = """
        SELECT * FROM hzrdr.lt_realization
        WHERE id = %s
        FOR UPDATE
        """

        [lt_rlz] = models.LtRealization.objects.raw(
            ltr_query, [lt_rlz.id])

        lt_rlz.completed_sources += len(src_ids)
        if lt_rlz.completed_sources == lt_rlz.total_sources:
            lt_rlz.is_complete = True

        lt_rlz.save()

    logs.LOG.debug('< transaction complete')