def log_percent_complete(job_id, ctype): """Log a message when the percentage completed changed for a calculation. :param int job_id: identifier of the job in question :param str ctype: calculation type, one of: hazard, risk """ if ctype not in ("hazard", "risk"): LOG.warn("Unknown calculation type: '%s'" % ctype) return -1 key = "nhzrd_total" if ctype == "hazard" else "nrisk_total" total = stats.pk_get(job_id, key) key = "nhzrd_done" if ctype == "hazard" else "nrisk_done" done = stats.pk_get(job_id, key) if done <= 0 or total <= 0: return 0 percent = total / 100.0 # Store percentage complete as well as the last value reported as integers # in order to avoid reporting the same percentage more than once. percent_complete = int(done / percent) # Get the last value reported lvr = stats.pk_get(job_id, "lvr") # Only report the percentage completed if it is above the last value shown if percent_complete > lvr: LOG.progress( "%s %3d%% complete" % (ctype, percent_complete), indent=True ) stats.pk_set(job_id, "lvr", percent_complete) return percent_complete
def log_percent_complete(job_id, ctype): """Log a message when the percentage completed changed for a calculation. :param int job_id: identifier of the job in question :param str ctype: calculation type, one of: hazard, risk """ if ctype not in ("hazard", "risk"): LOG.warn("Unknown calculation type: '%s'" % ctype) return -1 key = "nhzrd_total" if ctype == "hazard" else "nrisk_total" total = stats.pk_get(job_id, key) key = "nhzrd_done" if ctype == "hazard" else "nrisk_done" done = stats.pk_get(job_id, key) if done <= 0 or total <= 0: return 0 percent = total / 100.0 # Store percentage complete as well as the last value reported as integers # in order to avoid reporting the same percentage more than once. percent_complete = int(done / percent) # Get the last value reported lvr = stats.pk_get(job_id, "lvr") # Only report the percentage completed if it is above the last value shown if percent_complete > lvr: LOG.progress("%s %3d%% complete" % (ctype, percent_complete), indent=True) stats.pk_set(job_id, "lvr", percent_complete) return percent_complete
def test_pk_set_with_existing_incremental(self): """The value is set correctly for an existing predefined key.""" job_id = 72 pkey = "cblock" key = stats.key_name(job_id, *stats.STATS_KEYS[pkey]) stats.delete_job_counters(job_id) kvs = self.connect() stats.pk_set(job_id, pkey, 727) self.assertEqual("727", kvs.get(key))
def test_get_progress_timing_data_with_stale_increment_ts(self): # The progress counter increment time stamp exists but is not used # since the time stamp in the *executing* `JobPhaseStats` record is # more recent. tstamp = datetime.utcnow() - timedelta(minutes=9) stats.pk_set(self.job.id, "lvr_ts", tstamp.strftime("%s")) tstamp = datetime.utcnow() - timedelta(minutes=8) jps = JobPhaseStats(oq_job=self.job, ctype="hazard", job_status="executing") jps.start_time = tstamp jps.save() actual, timeout = stats.get_progress_timing_data(self.job) self.assertTrue(approx_equal(480, actual, 5))
def _initialize_progress(self, total): """Record the total/completed number of work items. This is needed for the purpose of providing an indication of progress to the end user.""" self.progress.update(total=total) stats.pk_set(self.job.id, "lvr", 0) stats.pk_set(self.job.id, "nrisk_total", total) stats.pk_set(self.job.id, "nrisk_done", 0)
def test_log_percent_complete_with_zero_percent_done(self): # nothing is reported since the percentage complete value is zero job_id = 13 stats.pk_set(job_id, "nhzrd_total", 100) stats.pk_set(job_id, "nhzrd_done", 0) stats.pk_set(job_id, "lvr", -1) with mock.patch("openquake.engine.logs.LOG.progress") as lpm: rv = logs.log_percent_complete(job_id, "hazard") self.assertEqual(0, rv) self.assertEqual(0, lpm.call_count)
def initialize_pr_data(self): """Record the total/completed number of work items. This is needed for the purpose of providing an indication of progress to the end user.""" stats.pk_set(self.job.id, "lvr", 0) rs = models.LtRealization.objects.filter(hazard_calculation=self.job.hazard_calculation) total = rs.aggregate(Sum("total_items")) done = rs.aggregate(Sum("completed_items")) stats.pk_set(self.job.id, "nhzrd_total", total.values().pop()) if done > 0: stats.pk_set(self.job.id, "nhzrd_done", done.values().pop())
def test_log_percent_complete_with_almost_same_percentage_value(self): # only 1 value is reported when the percentage complete value is # almost the same (12.6 versus 12). job_id = 12 stats.pk_set(job_id, "nhzrd_total", 366) stats.pk_set(job_id, "nhzrd_done", 46) stats.pk_set(job_id, "lvr", 12) with mock.patch("openquake.engine.logs.LOG.progress") as lpm: rv = logs.log_percent_complete(job_id, "hazard") self.assertEqual(12, rv) self.assertEqual(0, lpm.call_count)
def initialize_pr_data(self): """Record the total/completed number of work items. This is needed for the purpose of providing an indication of progress to the end user.""" stats.pk_set(self.job.id, "lvr", 0) rs = models.LtRealization.objects.filter( hazard_calculation=self.job.hazard_calculation) total = rs.aggregate(Sum("total_items")) done = rs.aggregate(Sum("completed_items")) stats.pk_set(self.job.id, "nhzrd_total", total.values().pop()) if done > 0: stats.pk_set(self.job.id, "nhzrd_done", done.values().pop())
def test_log_percent_complete_with_new_percentage_value(self): # the percentage complete is reported since it exceeds the last value # reported job_id = 14 stats.pk_set(job_id, "nhzrd_total", 100) stats.pk_set(job_id, "nhzrd_done", 20) stats.pk_set(job_id, "lvr", 12) with mock.patch("openquake.engine.logs.LOG.progress") as lpm: rv = logs.log_percent_complete(job_id, "hazard") self.assertEqual(20, rv) self.assertEqual(1, lpm.call_count) self.assertEqual("hazard 20% complete", lpm.call_args_list[0][0][0])
def _initialize_progress(self, total): """Record the total/completed number of work items. This is needed for the purpose of providing an indication of progress to the end user.""" logs.LOG.debug("Computing risk over %d assets" % total) self.progress.update(total=total) stats.pk_set(self.job.id, "lvr", 0) stats.pk_set(self.job.id, "nrisk_total", total) stats.pk_set(self.job.id, "nrisk_done", 0) job_stats = models.JobStats.objects.get(oq_job=self.job) job_stats.num_sites = total job_stats.num_tasks = self.expected_tasks(self.block_size()) job_stats.save()
def test_gc_clears_stats(self): # redis garbage collection should clear stats counters as well stats.pk_set(self.test_job, 'nhzrd_total', 10) stats.pk_set(self.test_job, 'nhzrd_done', 7) stats.pk_set(self.test_job, 'nhzrd_failed', 3) # Sanity check: self.assertEqual(10, stats.pk_get(self.test_job, 'nhzrd_total')) self.assertEqual(7, stats.pk_get(self.test_job, 'nhzrd_done')) self.assertEqual(3, stats.pk_get(self.test_job, 'nhzrd_failed')) result = kvs.cache_gc(self.test_job) # 6 keys should be deleted, including the stats keys: self.assertEqual(6, result) # explicitly test that the stats keys are deleted self.assertIsNone(stats.pk_get(self.test_job, 'nhzrd_total')) self.assertIsNone(stats.pk_get(self.test_job, 'nhzrd_done')) self.assertIsNone(stats.pk_get(self.test_job, 'nhzrd_failed'))