Esempio n. 1
0
def log_percent_complete(job_id, ctype):
    """Log a message when the percentage completed changed for a calculation.

    :param int job_id: identifier of the job in question
    :param str ctype: calculation type, one of: hazard, risk
    """
    if ctype not in ("hazard", "risk"):
        LOG.warn("Unknown calculation type: '%s'" % ctype)
        return -1

    key = "nhzrd_total" if ctype == "hazard" else "nrisk_total"
    total = stats.pk_get(job_id, key)
    key = "nhzrd_done" if ctype == "hazard" else "nrisk_done"
    done = stats.pk_get(job_id, key)

    if done <= 0 or total <= 0:
        return 0

    percent = total / 100.0
    # Store percentage complete as well as the last value reported as integers
    # in order to avoid reporting the same percentage more than once.
    percent_complete = int(done / percent)
    # Get the last value reported
    lvr = stats.pk_get(job_id, "lvr")

    # Only report the percentage completed if it is above the last value shown
    if percent_complete > lvr:
        log_progress("%s %3d%% complete" % (ctype, percent_complete), 2)
        stats.pk_set(job_id, "lvr", percent_complete)

    return percent_complete
Esempio n. 2
0
def log_percent_complete(job_id, ctype):
    """Log a message when the percentage completed changed for a calculation.

    :param int job_id: identifier of the job in question
    :param str ctype: calculation type, one of: hazard, risk
    """
    if ctype not in ("hazard", "risk"):
        LOG.warn("Unknown calculation type: '%s'" % ctype)
        return -1

    key = "nhzrd_total" if ctype == "hazard" else "nrisk_total"
    total = stats.pk_get(job_id, key)
    key = "nhzrd_done" if ctype == "hazard" else "nrisk_done"
    done = stats.pk_get(job_id, key)

    if done <= 0 or total <= 0:
        return 0

    percent = total / 100.0
    # Store percentage complete as well as the last value reported as integers
    # in order to avoid reporting the same percentage more than once.
    percent_complete = int(done / percent)
    # Get the last value reported
    lvr = stats.pk_get(job_id, "lvr")

    # Only report the percentage completed if it is above the last value shown
    if percent_complete > lvr:
        log_progress("%s %3d%% complete" % (ctype, percent_complete), 2)
        stats.pk_set(job_id, "lvr", percent_complete)

    return percent_complete
Esempio n. 3
0
    def serialize_hazard_map_at_poe(self, sites, poe, key_template,
                                    hm_attrib_update, nrml_file):
        """
        Serialize the hazard map for a set of sites at a given PoE.

        Depending on the parameters the serialized map will be a mean or
        quantile hazard map.

        :param sites: the sites of which the map will be serialized
        :type sites: list of :py:class:`openquake.shapes.Site`
        :param poe: the PoE at which the map will be serialized
        :type poe: :py:class:`float`
        :param key_template: a template for constructing the key used to get,
                             for each site, its map from the KVS
        :type key_template: :py:class:`string`
        :param hc_attrib_update: a dictionary containing metadata for the set
                                 of maps that will be serialized
        :type hc_attrib_update: :py:class:`dict`
        :param nrml_file: the output filename
        :type nrml_file: :py:class:`string`
        """
        nrml_path = self.job_ctxt.build_nrml_path(nrml_file)

        LOG.info("Generating NRML hazard map file for PoE %s, "
                 "%s nodes in hazard map: %s" % (poe, len(sites), nrml_file))

        map_writer = hazard_output.create_hazardmap_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            nrml_path)
        hm_data = []

        for site in sites:
            key = key_template % hash(site)
            # use hazard map IML values from KVS
            hm_attrib = {
                'investigationTimeSpan': self.job_ctxt['INVESTIGATION_TIME'],
                'IMT': self.job_ctxt['INTENSITY_MEASURE_TYPE'],
                'vs30': self.job_ctxt['REFERENCE_VS30_VALUE'],
                'IML': kvs.get_value_json_decoded(key),
                'poE': poe
            }

            hm_attrib.update(hm_attrib_update)
            hm_data.append((site, hm_attrib))

        LOG.debug(">> path: %s" % nrml_path)
        # XML serialization context
        xsc = namedtuple("XSC", "blocks, cblock, i_total, i_done, i_next")(
            stats.pk_get(self.job_ctxt.job_id, "blocks"),
            stats.pk_get(self.job_ctxt.job_id, "cblock"), len(sites), 0,
            len(hm_data))
        hazard_output.SerializerContext().update(xsc)
        map_writer.serialize(hm_data)

        return nrml_path
Esempio n. 4
0
    def serialize_hazard_map_at_poe(self, sites, poe, key_template,
                                    hm_attrib_update, nrml_file):
        """
        Serialize the hazard map for a set of sites at a given PoE.

        Depending on the parameters the serialized map will be a mean or
        quantile hazard map.

        :param sites: the sites of which the map will be serialized
        :type sites: list of :py:class:`openquake.shapes.Site`
        :param poe: the PoE at which the map will be serialized
        :type poe: :py:class:`float`
        :param key_template: a template for constructing the key used to get,
                             for each site, its map from the KVS
        :type key_template: :py:class:`string`
        :param hc_attrib_update: a dictionary containing metadata for the set
                                 of maps that will be serialized
        :type hc_attrib_update: :py:class:`dict`
        :param nrml_file: the output filename
        :type nrml_file: :py:class:`string`
        """
        nrml_path = self.job_ctxt.build_nrml_path(nrml_file)

        LOG.info("Generating NRML hazard map file for PoE %s, "
                 "%s nodes in hazard map: %s" % (poe, len(sites), nrml_file))

        map_writer = hazard_output.create_hazardmap_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            nrml_path)
        hm_data = []

        for site in sites:
            key = key_template % hash(site)
            # use hazard map IML values from KVS
            hm_attrib = {
                'investigationTimeSpan':
                    self.job_ctxt['INVESTIGATION_TIME'],
                'IMT': self.job_ctxt['INTENSITY_MEASURE_TYPE'],
                'vs30': self.job_ctxt['REFERENCE_VS30_VALUE'],
                'IML': kvs.get_value_json_decoded(key),
                'poE': poe}

            hm_attrib.update(hm_attrib_update)
            hm_data.append((site, hm_attrib))

        LOG.debug(">> path: %s" % nrml_path)
        # XML serialization context
        xsc = namedtuple("XSC", "blocks, cblock, i_total, i_done, i_next")(
                         stats.pk_get(self.job_ctxt.job_id, "blocks"),
                         stats.pk_get(self.job_ctxt.job_id, "cblock"),
                         len(sites), 0, len(hm_data))
        hazard_output.SerializerContext().update(xsc)
        map_writer.serialize(hm_data)

        return nrml_path
Esempio n. 5
0
    def test_pk_get_with_existing_incremental(self):
        """The correct value is obtained for an existing predefined key."""
        job_id = 92
        pkey = "cblock"
        key = stats.key_name(job_id, *stats.STATS_KEYS[pkey])

        stats.delete_job_counters(job_id)
        kvs = self.connect()
        kvs.set(key, 929)
        stats.pk_get(job_id, pkey)
        self.assertEqual("929", kvs.get(key))
Esempio n. 6
0
    def test_pk_get_with_existing_incremental(self):
        """The correct value is obtained for an existing predefined key."""
        job_id = 92
        pkey = "cblock"
        key = stats.key_name(job_id, *stats.STATS_KEYS[pkey])

        stats.delete_job_counters(job_id)
        kvs = self.connect()
        kvs.set(key, 929)
        stats.pk_get(job_id, pkey)
        self.assertEqual("929", kvs.get(key))
    def test_compute_uhs_task_pi_failure_counter(self):
        # Same as the previous test, except that we want to make sure task
        # failure counters are properly incremented if a task fails.

        cmpt_uhs = '%s.%s' % (self.UHS_CORE_MODULE, 'compute_uhs')
        with helpers.patch(cmpt_uhs) as compute_mock:

            # We want to force a failure to occur in the task:
            compute_mock.side_effect = RuntimeError('Mock exception')

            get_counter = lambda: stats.pk_get(self.job_id, "nhzrd_failed")

            # The counter should start out empty:
            self.assertEqual(0, get_counter())

            self.assertRaises(RuntimeError, compute_uhs_task,
                              self.job_id, 0, site=Site(0.0, 0.0))
            self.assertEqual(1, get_counter())

            # Create two more failures:
            self.assertRaises(RuntimeError, compute_uhs_task,
                              self.job_id, 0, site=Site(0.0, 0.0))
            self.assertRaises(RuntimeError, compute_uhs_task,
                              self.job_id, 0, site=Site(0.0, 0.0))
            self.assertEqual(3, get_counter())
Esempio n. 8
0
    def test_compute_uhs_task_pi_failure_counter(self):
        # Same as the previous test, except that we want to make sure task
        # failure counters are properly incremented if a task fails.

        cmpt_uhs = '%s.%s' % (self.UHS_CORE_MODULE, 'compute_uhs')
        with helpers.patch(cmpt_uhs) as compute_mock:

            # We want to force a failure to occur in the task:
            compute_mock.side_effect = RuntimeError('Mock exception')

            get_counter = lambda: stats.pk_get(self.job_id, "nhzrd_failed")

            # The counter should start out empty:
            self.assertEqual(0, get_counter())

            self.assertRaises(RuntimeError,
                              compute_uhs_task,
                              self.job_id,
                              0,
                              site=Site(0.0, 0.0))
            self.assertEqual(1, get_counter())

            # Create two more failures:
            self.assertRaises(RuntimeError,
                              compute_uhs_task,
                              self.job_id,
                              0,
                              site=Site(0.0, 0.0))
            self.assertRaises(RuntimeError,
                              compute_uhs_task,
                              self.job_id,
                              0,
                              site=Site(0.0, 0.0))
            self.assertEqual(3, get_counter())
Esempio n. 9
0
    def test_compute_uhs_task_pi(self):
        # Test that progress indicators are working properly for
        # `compute_uhs_task`.

        # Mock out the two 'heavy' functions called by this task;
        # we don't need to do these and we don't want to waste the cycles.
        cmpt_uhs = '%s.%s' % (self.UHS_CORE_MODULE, 'compute_uhs')
        write_uhs_data = '%s.%s' % (self.UHS_CORE_MODULE,
                                    'write_uhs_spectrum_data')
        with helpers.patch(cmpt_uhs):
            with helpers.patch(write_uhs_data):

                get_counter = lambda: stats.pk_get(self.job_id, "nhzrd_done")

                # First, check that the completion counter is zero
                self.assertEqual(0, get_counter())

                realization = 0
                site = Site(0.0, 0.0)
                # execute the task as a plain old function
                compute_uhs_task(self.job_id, realization, site=site)
                self.assertEqual(1, get_counter())

                compute_uhs_task(self.job_id, realization, site=site)
                self.assertEqual(2, get_counter())
Esempio n. 10
0
    def test_compute_uhs_task_pi(self):
        # Test that progress indicators are working properly for
        # `compute_uhs_task`.

        # Mock out the two 'heavy' functions called by this task;
        # we don't need to do these and we don't want to waste the cycles.
        cmpt_uhs = '%s.%s' % (self.UHS_CORE_MODULE, 'compute_uhs')
        write_uhs_data = '%s.%s' % (self.UHS_CORE_MODULE,
                                    'write_uhs_spectrum_data')
        with helpers.patch(cmpt_uhs):
            with helpers.patch(write_uhs_data):

                get_counter = lambda: stats.pk_get(self.job_id, "nhzrd_done")

                # First, check that the completion counter is zero
                self.assertEqual(0, get_counter())

                realization = 0
                site = Site(0.0, 0.0)
                # execute the task as a plain old function
                compute_uhs_task(self.job_id, realization, site=site)
                self.assertEqual(1, get_counter())

                compute_uhs_task(self.job_id, realization, site=site)
                self.assertEqual(2, get_counter())
Esempio n. 11
0
def completed_task_count(job_id):
    """Given the ID of a currently running calculation, query the stats
    counters in Redis to get the number of completed
    :function:`compute_uhs_task` task executions.

    Successful and failed executions are included in the count.

    :param int job_id:
        ID of the current job.
    :returns:
        Number of completed :function:`compute_uhs_task` task executions so
        far.
    """
    success_count = stats.pk_get(job_id, "nhzrd_done")
    fail_count = stats.pk_get(job_id, "nhzrd_failed")

    return (success_count or 0) + (fail_count or 0)
    def test_with_job_id_and_data_in_args(self):
        # The job_id is passed via args
        result = _RESULTS.next()
        job_id = _JOB_IDS.next()
        ctype = "h"

        @stats.count_progress(ctype)
        def no_exception(job_id, items):
            return result

        previous_value = stats.pk_get(job_id, _COUNTER[ctype])

        # Call the wrapped function.
        self.assertEqual(result, no_exception(job_id, range(result)))

        value = stats.pk_get(job_id, _COUNTER[ctype])
        self.assertEqual(result, (value - previous_value))
Esempio n. 13
0
    def test_with_job_id_and_data_in_args(self):
        # The job_id is passed via args
        result = _RESULTS.next()
        job_id = _JOB_IDS.next()
        ctype = "h"

        @stats.count_progress(ctype)
        def no_exception(job_id, items):
            return result

        previous_value = stats.pk_get(job_id, _COUNTER[ctype])

        # Call the wrapped function.
        self.assertEqual(result, no_exception(job_id, range(result)))

        value = stats.pk_get(job_id, _COUNTER[ctype])
        self.assertEqual(result, (value - previous_value))
Esempio n. 14
0
def completed_task_count(job_id):
    """Given the ID of a currently running calculation, query the stats
    counters in Redis to get the number of completed
    :function:`compute_uhs_task` task executions.

    Successful and failed executions are included in the count.

    :param int job_id:
        ID of the current job.
    :returns:
        Number of completed :function:`compute_uhs_task` task executions so
        far.
    """
    success_count = stats.pk_get(job_id, "nhzrd_done")
    fail_count = stats.pk_get(job_id, "nhzrd_failed")

    return (success_count or 0) + (fail_count or 0)
Esempio n. 15
0
    def test_success_stats(self):
        """
        The success counter is incremented when the wrapped function
        terminates without raising an exception.
        """
        area = "h"

        @stats.count_progress(area)
        def no_exception(job_id, items):
            return 999

        previous_value = stats.pk_get(11, "nhzrd_done")

        # Call the wrapped function.
        self.assertEqual(999, no_exception(11, range(5)))

        value = stats.pk_get(11, "nhzrd_done")
        self.assertEqual(5, (value - previous_value))
Esempio n. 16
0
    def test_failure_stats(self):
        """
        The failure counter is incremented when the wrapped function
        terminates raises an exception.
        """
        area = "r"

        @stats.count_progress(area)
        def raise_exception(job_id, items):
            raise NotImplementedError

        previous_value = stats.pk_get(22, "nrisk_failed")

        # Call the wrapped function.
        self.assertRaises(NotImplementedError, raise_exception, 22, range(6))

        value = stats.pk_get(22, "nrisk_failed")
        self.assertEqual(6, (value - previous_value))
Esempio n. 17
0
    def test_initialize_pr_data(self):
        # The total/done counters for progress reporting are initialized
        # correctly.
        self.calc.initialize_sources()
        self.calc.initialize_realizations(
            rlz_callbacks=[self.calc.initialize_hazard_curve_progress])
        ltr1, ltr2 = models.LtRealization.objects.filter(
            hazard_calculation=self.job.hazard_calculation.id).order_by("id")

        ltr1.completed_sources = 11
        ltr1.save()

        self.calc.initialize_pr_data()

        total = stats.pk_get(self.calc.job.id, "nhzrd_total")
        self.assertEqual(ltr1.total_sources + ltr2.total_sources, total)
        done = stats.pk_get(self.calc.job.id, "nhzrd_done")
        self.assertEqual(ltr1.completed_sources + ltr2.completed_sources, done)
    def test_with_data_arg_in_kwargs_overriding_2nd_positional_param(self):
        # A data parameter passed via kwargs will override the second
        # positional parameter.
        result = _RESULTS.next()
        job_id = _JOB_IDS.next()
        ctype = "h"

        @stats.count_progress(ctype, data_arg="the_data_arg")
        def no_exception(job_id, items, the_data_arg):
            return result

        previous_value = stats.pk_get(job_id, _COUNTER[ctype])

        # Call the wrapped function.
        self.assertEqual(result, no_exception(job_id, range(result - 1),
                         the_data_arg=range(result)))

        value = stats.pk_get(job_id, _COUNTER[ctype])
        self.assertEqual(result, (value - previous_value))
Esempio n. 19
0
 def test_pk_get_with_existing_debug_and_debug_stats_off(self):
     """`None` is returned when debug stats are off."""
     job_id = 95
     pkey = "hcls_xmlcurvewrites"
     stats.delete_job_counters(job_id)
     with helpers.patch("openquake.utils.stats.debug_stats_enabled") as dse:
         dse.return_value = False
         key = stats._KEY_TEMPLATE % ((job_id,) + stats.STATS_KEYS[pkey])
         kvs = self.connect()
         kvs.set(key, 959)
         self.assertIs(None, stats.pk_get(job_id, pkey))
Esempio n. 20
0
 def test_pk_get_with_existing_debug_and_debug_stats_enabled(self):
     """The value is obtained correctly for an existing debug counter."""
     job_id = 94
     pkey = "hcls_xmlcurvewrites"
     stats.delete_job_counters(job_id)
     with helpers.patch("openquake.utils.stats.debug_stats_enabled") as dse:
         dse.return_value = True
         key = stats.key_name(job_id, *stats.STATS_KEYS[pkey])
         kvs = self.connect()
         kvs.set(key, 949)
         self.assertEqual(949, stats.pk_get(job_id, pkey))
Esempio n. 21
0
 def test_pk_get_with_existing_debug_and_debug_stats_enabled(self):
     """The value is obtained correctly for an existing debug counter."""
     job_id = 94
     pkey = "hcls_xmlcurvewrites"
     stats.delete_job_counters(job_id)
     with helpers.patch("openquake.utils.stats.debug_stats_enabled") as dse:
         dse.return_value = True
         key = stats.key_name(job_id, *stats.STATS_KEYS[pkey])
         kvs = self.connect()
         kvs.set(key, 949)
         self.assertEqual(949, stats.pk_get(job_id, pkey))
Esempio n. 22
0
 def test_pk_get_with_existing_debug_and_debug_stats_off(self):
     """`None` is returned when debug stats are off."""
     job_id = 95
     pkey = "hcls_xmlcurvewrites"
     stats.delete_job_counters(job_id)
     with helpers.patch("openquake.utils.stats.debug_stats_enabled") as dse:
         dse.return_value = False
         key = stats._KEY_TEMPLATE % ((job_id, ) + stats.STATS_KEYS[pkey])
         kvs = self.connect()
         kvs.set(key, 959)
         self.assertIs(None, stats.pk_get(job_id, pkey))
Esempio n. 23
0
    def test_initialize_pr_data_with_ses(self):
        hc = self.job.hazard_calculation

        # Initialize sources as a setup for the test:
        self.calc.initialize_sources()

        self.calc.initialize_realizations(
            rlz_callbacks=[self.calc.initialize_ses_db_records])

        ltr1, ltr2 = models.LtRealization.objects.filter(
            hazard_calculation=hc).order_by("id")

        ltr1.completed_sources = 12
        ltr1.save()

        self.calc.initialize_pr_data()

        total = stats.pk_get(self.calc.job.id, "nhzrd_total")
        self.assertEqual(ltr1.total_sources + ltr2.total_sources, total)
        done = stats.pk_get(self.calc.job.id, "nhzrd_done")
        self.assertEqual(ltr1.completed_sources + ltr2.completed_sources, done)
Esempio n. 24
0
    def test_with_data_arg_in_kwargs_overriding_2nd_positional_param(self):
        # A data parameter passed via kwargs will override the second
        # positional parameter.
        result = _RESULTS.next()
        job_id = _JOB_IDS.next()
        ctype = "h"

        @stats.count_progress(ctype, data_arg="the_data_arg")
        def no_exception(job_id, items, the_data_arg):
            return result

        previous_value = stats.pk_get(job_id, _COUNTER[ctype])

        # Call the wrapped function.
        self.assertEqual(
            result,
            no_exception(job_id, range(result - 1),
                         the_data_arg=range(result)))

        value = stats.pk_get(job_id, _COUNTER[ctype])
        self.assertEqual(result, (value - previous_value))
Esempio n. 25
0
    def serialize_hazard_curve(self, nrml_file, key_template, hc_attrib_update,
                               sites):
        """
        Serialize the hazard curves of a set of sites.

        Depending on the parameters the serialized curve will be a plain, mean
        or quantile hazard curve.

        :param nrml_file: the output filename
        :type nrml_file: :py:class:`string`
        :param key_template: a template for constructing the key to get, for
                             each site, its curve from the KVS
        :type key_template: :py:class:`string`
        :param hc_attrib_update: a dictionary containing metadata for the set
                                 of curves that will be serialized
        :type hc_attrib_update: :py:class:`dict`
        :param sites: the sites of which the curve will be serialized
        :type sites: list of :py:class:`openquake.shapes.Site`
        """

        def pause_generator(value):
            """
            Returns the initial value when called for the first time and
            the double value upon each subsequent invocation.

            N.B.: the maximum value returned will never exceed 90 (seconds).
            """
            yield value
            while True:
                if value < 45:
                    value *= 2
                yield value

        # XML serialization context
        xsc = namedtuple("XSC", "blocks, cblock, i_total, i_done, i_next")(
                         stats.pk_get(self.job_ctxt.job_id, "blocks"),
                         stats.pk_get(self.job_ctxt.job_id, "cblock"),
                         len(sites), 0, 0)

        nrml_path = self.job_ctxt.build_nrml_path(nrml_file)

        curve_writer = hazard_output.create_hazardcurve_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            nrml_path)

        sites = set(sites)
        accounted_for = set()
        min_pause = 0.1
        pgen = pause_generator(min_pause)
        pause = pgen.next()

        while accounted_for != sites:
            failures = stats.failure_counters(self.job_ctxt.job_id, "h")
            if failures:
                raise RuntimeError("hazard failures (%s), aborting" % failures)
            hc_data = []
            # Sleep a little before checking the availability of additional
            # hazard curve results.
            time.sleep(pause)
            results_found = 0
            for site in sites:
                if site in accounted_for:
                    continue
                value = kvs.get_value_json_decoded(key_template % hash(site))
                if value is None:
                    # No value yet, proceed to next site.
                    continue
                # Use hazard curve ordinate values (PoE) from KVS and abscissae
                # from the IML list in config.
                hc_attrib = {
                    'investigationTimeSpan':
                        self.job_ctxt['INVESTIGATION_TIME'],
                    'IMLValues': self.job_ctxt.imls,
                    'IMT': self.job_ctxt['INTENSITY_MEASURE_TYPE'],
                    'PoEValues': value}
                hc_attrib.update(hc_attrib_update)
                hc_data.append((site, hc_attrib))
                accounted_for.add(site)
                results_found += 1
            if not results_found:
                # No results found, increase the sleep pause.
                pause = pgen.next()
            else:
                hazard_output.SerializerContext().update(
                    xsc._replace(i_next=len(hc_data)))
                curve_writer.serialize(hc_data)
                xsc = xsc._replace(i_done=xsc.i_done + len(hc_data))
                pause *= 0.8
                pause = min_pause if pause < min_pause else pause

        return nrml_path
Esempio n. 26
0
    def serialize_hazard_curve(self, nrml_file, key_template, hc_attrib_update,
                               sites):
        """
        Serialize the hazard curves of a set of sites.

        Depending on the parameters the serialized curve will be a plain, mean
        or quantile hazard curve.

        :param nrml_file: the output filename
        :type nrml_file: :py:class:`string`
        :param key_template: a template for constructing the key to get, for
                             each site, its curve from the KVS
        :type key_template: :py:class:`string`
        :param hc_attrib_update: a dictionary containing metadata for the set
                                 of curves that will be serialized
        :type hc_attrib_update: :py:class:`dict`
        :param sites: the sites of which the curve will be serialized
        :type sites: list of :py:class:`openquake.shapes.Site`
        """
        def pause_generator(value):
            """
            Returns the initial value when called for the first time and
            the double value upon each subsequent invocation.

            N.B.: the maximum value returned will never exceed 90 (seconds).
            """
            yield value
            while True:
                if value < 45:
                    value *= 2
                yield value

        # XML serialization context
        xsc = namedtuple("XSC", "blocks, cblock, i_total, i_done, i_next")(
            stats.pk_get(self.job_ctxt.job_id, "blocks"),
            stats.pk_get(self.job_ctxt.job_id, "cblock"), len(sites), 0, 0)

        nrml_path = self.job_ctxt.build_nrml_path(nrml_file)

        curve_writer = hazard_output.create_hazardcurve_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            nrml_path)

        sites = set(sites)
        accounted_for = set()
        min_pause = 0.1
        pgen = pause_generator(min_pause)
        pause = pgen.next()

        while accounted_for != sites:
            failures = stats.failure_counters(self.job_ctxt.job_id, "h")
            if failures:
                raise RuntimeError("hazard failures (%s), aborting" % failures)
            hc_data = []
            # Sleep a little before checking the availability of additional
            # hazard curve results.
            time.sleep(pause)
            results_found = 0
            for site in sites:
                if site in accounted_for:
                    continue
                value = kvs.get_value_json_decoded(key_template % hash(site))
                if value is None:
                    # No value yet, proceed to next site.
                    continue
                # Use hazard curve ordinate values (PoE) from KVS and abscissae
                # from the IML list in config.
                hc_attrib = {
                    'investigationTimeSpan':
                    self.job_ctxt['INVESTIGATION_TIME'],
                    'IMLValues': self.job_ctxt.imls,
                    'IMT': self.job_ctxt['INTENSITY_MEASURE_TYPE'],
                    'PoEValues': value
                }
                hc_attrib.update(hc_attrib_update)
                hc_data.append((site, hc_attrib))
                accounted_for.add(site)
                results_found += 1
            if not results_found:
                # No results found, increase the sleep pause.
                pause = pgen.next()
            else:
                hazard_output.SerializerContext().update(
                    xsc._replace(i_next=len(hc_data)))
                curve_writer.serialize(hc_data)
                xsc = xsc._replace(i_done=xsc.i_done + len(hc_data))
                pause *= 0.8
                pause = min_pause if pause < min_pause else pause

        return nrml_path