def test_workflow(self): # Test `pre_execute` to ensure that all stats are properly initialized. # Then test the core disaggregation function. self.calc.pre_execute() engine.save_job_stats(self.job) job_stats = models.JobStats.objects.get(oq_job=self.job.id) self.assertEqual(2, job_stats.num_sites) # To test the disagg function, we first need to compute the hazard # curves: os.environ['OQ_NO_DISTRIBUTE'] = '1' try: self.calc.execute() self.calc.save_hazard_curves() finally: del os.environ['OQ_NO_DISTRIBUTE'] diss1, diss2, diss3, diss4 = list(self.calc.disagg_task_arg_gen()) base_path = 'openquake.engine.calculators.hazard.disaggregation.core' disagg_calc_func = ( 'openquake.hazardlib.calc.disagg.disaggregation' ) with mock.patch(disagg_calc_func) as disagg_mock: disagg_mock.return_value = (None, None) with mock.patch('%s.%s' % (base_path, '_save_disagg_matrix') ) as save_mock: # Some of these tasks will not compute anything, since the # hazard curves for these few are all 0.0s. # Here's what we expect: # diss1: compute # diss2: skip # diss3: compute # diss4: skip core.compute_disagg.task_func(*diss1) # 2 poes * 2 imts * 1 site = 4 self.assertEqual(4, disagg_mock.call_count) self.assertEqual(0, save_mock.call_count) # no rupt generated core.compute_disagg.task_func(*diss2) self.assertEqual(4, disagg_mock.call_count) self.assertEqual(0, save_mock.call_count) # no rupt generated core.compute_disagg.task_func(*diss3) self.assertEqual(8, disagg_mock.call_count) self.assertEqual(0, save_mock.call_count) # no rupt generated core.compute_disagg.task_func(*diss4) self.assertEqual(8, disagg_mock.call_count) self.assertEqual(0, save_mock.call_count) # no rupt generated
def test_complete_calculation_workflow(self): # Test the calculation workflow, from pre_execute through clean_up hc = self.job.hazard_calculation self.calc.pre_execute() save_job_stats(self.job) # Test the job stats: job_stats = models.JobStats.objects.get(oq_job=self.job.id) # num sources * num lt samples / block size (items per task): self.assertEqual(120, job_stats.num_sites) # Update job status to move on to the execution phase. self.job.is_running = True self.job.status = 'executing' self.job.save() self.calc.execute() self.job.status = 'post_executing' self.job.save() self.calc.post_execute() lt_rlzs = models.LtRealization.objects.filter( hazard_calculation=self.job.hazard_calculation.id) self.assertEqual(2, len(lt_rlzs)) # Now we test that the htemp results were copied to the final location # in `hzrdr.hazard_curve` and `hzrdr.hazard_curve_data`. for rlz in lt_rlzs: # get hazard curves for this realization [pga_curves] = models.HazardCurve.objects.filter( lt_realization=rlz.id, imt='PGA') [sa_curves] = models.HazardCurve.objects.filter( lt_realization=rlz.id, imt='SA', sa_period=0.025) # check that the multi-hazard-curve outputs have been # created for this realization self.assertEqual( 1, models.HazardCurve.objects.filter( lt_realization=rlz.id, imt=None, statistics=None).count()) # In this calculation, we have 120 sites of interest. # We should have exactly that many curves per realization # per IMT. pga_curve_data = models.HazardCurveData.objects.filter( hazard_curve=pga_curves.id) self.assertEqual(120, len(pga_curve_data)) sa_curve_data = models.HazardCurveData.objects.filter( hazard_curve=sa_curves.id) self.assertEqual(120, len(sa_curve_data)) # test post processing self.job.status = 'post_processing' self.job.save() self.calc.post_process() # Test for the correct number of mean/quantile curves self.assertEqual( 1, models.HazardCurve.objects.filter( output__oq_job=self.job, lt_realization__isnull=True, statistics="mean", imt="PGA").count()) self.assertEqual( 1, models.HazardCurve.objects.filter( output__oq_job=self.job, lt_realization__isnull=True, statistics="mean", imt="SA", sa_period=0.025).count()) self.assertEqual( 1, models.HazardCurve.objects.filter( output__oq_job=self.job, lt_realization__isnull=True, statistics="mean", imt=None).count()) for quantile in hc.quantile_hazard_curves: self.assertEqual( 1, models.HazardCurve.objects.filter( lt_realization__isnull=True, statistics="quantile", output__oq_job=self.job, quantile=quantile, imt="PGA").count()) self.assertEqual( 1, models.HazardCurve.objects.filter( lt_realization__isnull=True, statistics="quantile", output__oq_job=self.job, quantile=quantile, imt="SA", sa_period=0.025).count()) self.assertEqual( 1, models.HazardCurve.objects.filter( lt_realization__isnull=True, statistics="quantile", output__oq_job=self.job, quantile=quantile, imt=None).count()) # Test for the correct number of maps. # The expected count is: # (num_poes * num_imts * num_rlzs) # + # (num_poes * num_imts * (1 mean + num_quantiles)) # Thus: # (2 * 2 * 2) + (2 * 2 * (1 + 2)) = 20 hazard_maps = models.HazardMap.objects.filter(output__oq_job=self.job) self.assertEqual(20, hazard_maps.count()) # test for the correct number of UH Spectra: # The expected count is: # (num_hazard_maps_PGA_or_SA / num_poes) # (20 / 2) = 10 uhs = models.UHS.objects.filter(output__oq_job=self.job) self.assertEqual(10, uhs.count()) # Now test the number of curves in each UH Spectra # It should be equal to the number of sites (120) for u in uhs: self.assertEqual(120, u.uhsdata_set.count()) self.job.status = 'clean_up' self.job.save() self.calc.clean_up() self.assertEqual(0, len(self.calc.source_blocks_per_ltpath))