def setUp(self): django.setup() self.job_1 = job_test_utils.create_job(status='RUNNING') self.job_2 = job_test_utils.create_job(data={}) definition = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'Job 1', 'job_type': { 'name': self.job_1.job_type.name, 'version': self.job_1.job_type.version, } }, { 'name': 'Job 2', 'job_type': { 'name': self.job_2.job_type.name, 'version': self.job_2.job_type.version, }, 'dependencies': [{ 'name': 'Job 1' }], }], } self.recipe_type = recipe_test_utils.create_recipe_type(definition=definition) self.recipe = recipe_test_utils.create_recipe(recipe_type=self.recipe_type) self.recipe_job = recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='Job 1', job=self.job_1) self.recipe_job = recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='Job 2', job=self.job_2)
def setUp(self): django.setup() self.job_1 = job_test_utils.create_job(status="RUNNING") self.job_2 = job_test_utils.create_job(data={}) self.job_3 = job_test_utils.create_job(status="FAILED") definition = { "version": "1.0", "input_data": [], "jobs": [ { "name": "Job 1", "job_type": {"name": self.job_1.job_type.name, "version": self.job_1.job_type.version}, }, { "name": "Job 2", "job_type": {"name": self.job_2.job_type.name, "version": self.job_2.job_type.version}, "dependencies": [{"name": "Job 1"}], }, ], } self.recipe_type = recipe_test_utils.create_recipe_type(definition=definition) self.recipe = recipe_test_utils.create_recipe(recipe_type=self.recipe_type) self.recipe_job = recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name="Job 1", job=self.job_1) self.recipe_job = recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name="Job 2", job=self.job_2)
def setUp(self): django.setup() self.job_type1 = job_test_utils.create_job_type(name="test1", version="1.0", category="test-1") self.job1 = job_test_utils.create_job(job_type=self.job_type1, status="RUNNING") self.job_type2 = job_test_utils.create_job_type(name="test2", version="1.0", category="test-2") self.job2 = job_test_utils.create_job(job_type=self.job_type2, status="PENDING")
def testJobTypesForAcessWithJustEnoughUsage(self): job1 = job_test_utils.create_job(self.job_type_1) job_test_utils.create_job_exe(job=job1) job2 = job_test_utils.create_job(self.job_type_1) job_test_utils.create_job_exe(job=job2) runnable_job_types = SharedResource.objects.runnable_job_types(self.node_with_special_access) self.assertIn(self.job_type_1a, runnable_job_types)
def setUp(self): django.setup() self.job_type_1 = job_test_utils.create_job_type() self.job_type_2 = job_test_utils.create_job_type() self.job_1a = job_test_utils.create_job(job_type=self.job_type_1, status="COMPLETED") job_test_utils.create_job_exe( job=self.job_1a, status="FAILED", created=timezone.now() - datetime.timedelta(hours=3) ) time.sleep(0.01) job_test_utils.create_job_exe( job=self.job_1a, status="FAILED", created=timezone.now() - datetime.timedelta(hours=2) ) time.sleep(0.01) job_test_utils.create_job_exe( job=self.job_1a, status="COMPLETED", created=timezone.now() - datetime.timedelta(hours=1), last_modified=timezone.now() - datetime.timedelta(hours=1), ) time.sleep(0.01) self.last_run_1a = job_test_utils.create_job_exe(job=self.job_1a, status="RUNNING") self.job_1b = job_test_utils.create_job(job_type=self.job_type_1, status="FAILED") time.sleep(0.01) self.last_run_1b = job_test_utils.create_job_exe(job=self.job_1b, status="FAILED") self.job_2a = job_test_utils.create_job(job_type=self.job_type_2, status="RUNNING") time.sleep(0.01) job_test_utils.create_job_exe( job=self.job_2a, status="FAILED", created=timezone.now() - datetime.timedelta(hours=3), last_modified=timezone.now() - datetime.timedelta(hours=2), ) time.sleep(0.01) job_test_utils.create_job_exe( job=self.job_2a, status="FAILED", created=timezone.now() - datetime.timedelta(hours=2), last_modified=timezone.now() - datetime.timedelta(hours=1), ) time.sleep(0.01) job_test_utils.create_job_exe( job=self.job_2a, status="COMPLETED", created=timezone.now() - datetime.timedelta(hours=1) ) time.sleep(0.01) self.last_run_2a = job_test_utils.create_job_exe(job=self.job_2a, status="RUNNING") self.job_2b = job_test_utils.create_job(job_type=self.job_type_2, status="COMPLETED") time.sleep(0.01) self.last_run_2b = job_test_utils.create_job_exe(job=self.job_2b, status="COMPLETED")
def test_calculate_stats(self): """Tests calculating individual statistics for a metrics entry.""" error = error_test_utils.create_error(is_builtin=True) job1 = job_test_utils.create_job(error=error, status='FAILED', ended=datetime.datetime(2015, 1, 1)) job_test_utils.create_job_exe( job=job1, error=error, status=job1.status, queued=datetime.datetime(2015, 1, 1, tzinfo=timezone.utc), started=datetime.datetime(2015, 1, 1, 0, 10, 2, tzinfo=timezone.utc), pre_started=datetime.datetime(2015, 1, 1, 0, 30, 4, tzinfo=timezone.utc), pre_completed=datetime.datetime(2015, 1, 1, 1, 6, tzinfo=timezone.utc), job_started=datetime.datetime(2015, 1, 1, 1, 40, 8, tzinfo=timezone.utc), job_completed=datetime.datetime(2015, 1, 1, 2, 30, 10, tzinfo=timezone.utc), post_started=datetime.datetime(2015, 1, 1, 3, 30, 12, tzinfo=timezone.utc), post_completed=datetime.datetime(2015, 1, 1, 4, 40, 14, tzinfo=timezone.utc), ended=datetime.datetime(2015, 1, 1, 6, 0, 16, tzinfo=timezone.utc), ) job2 = job_test_utils.create_job(error=error, status='FAILED', ended=datetime.datetime(2015, 1, 1)) job_test_utils.create_job_exe( job=job2, error=error, status=job2.status, queued=datetime.datetime(2015, 1, 1, tzinfo=timezone.utc), started=datetime.datetime(2015, 1, 1, 2, 10, 2, tzinfo=timezone.utc), pre_started=datetime.datetime(2015, 1, 1, 4, 30, 4, tzinfo=timezone.utc), pre_completed=datetime.datetime(2015, 1, 1, 6, 0, 8, tzinfo=timezone.utc), job_started=datetime.datetime(2015, 1, 1, 8, 40, 14, tzinfo=timezone.utc), job_completed=datetime.datetime(2015, 1, 1, 10, 30, 22, tzinfo=timezone.utc), post_started=datetime.datetime(2015, 1, 1, 12, 30, 32, tzinfo=timezone.utc), post_completed=datetime.datetime(2015, 1, 1, 14, 40, 44, tzinfo=timezone.utc), ended=datetime.datetime(2015, 1, 1, 16, 0, 58, tzinfo=timezone.utc), ) sys_error = error_test_utils.create_error(category='SYSTEM', is_builtin=True) job3a = job_test_utils.create_job(error=sys_error, status='FAILED', ended=datetime.datetime(2015, 1, 1)) job_test_utils.create_job_exe(job=job3a, status=job3a.status, ended=job3a.ended, error=sys_error) data_error = error_test_utils.create_error(category='DATA', is_builtin=True) job3b = job_test_utils.create_job(error=data_error, status='FAILED', ended=datetime.datetime(2015, 1, 1)) job_test_utils.create_job_exe(job=job3b, status=job3b.status, ended=job3b.ended, error=data_error) algo_error = error_test_utils.create_error(category='ALGORITHM', is_builtin=True) job3c = job_test_utils.create_job(error=algo_error, status='FAILED', ended=datetime.datetime(2015, 1, 1)) job_test_utils.create_job_exe(job=job3c, status=job3c.status, ended=job3c.ended, error=algo_error) MetricsError.objects.calculate(datetime.date(2015, 1, 1)) entries = MetricsError.objects.filter(occurred=datetime.date(2015, 1, 1)) self.assertEqual(len(entries), 4) for entry in entries: self.assertEqual(entry.occurred, datetime.date(2015, 1, 1)) if entry.error == error: self.assertEqual(entry.total_count, 2) else: self.assertEqual(entry.total_count, 1)
def test_update_jobs_to_running(self): '''Tests that job attributes are updated when a job is running.''' job_1 = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) job_2 = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) when = timezone.now() jobs = Job.objects.update_jobs_to_running([job_1.id, job_2.id], when) for job in jobs: self.assertEqual(job.status, u'RUNNING') self.assertEqual(job.started, when) self.assertIsNone(job.ended) self.assertEqual(job.last_status_change, when)
def setUp(self): django.setup() self.job_type1 = job_test_utils.create_job_type(name='test1', category='test-1', is_operational=True) self.job1 = job_test_utils.create_job(job_type=self.job_type1) self.job_exe1 = job_test_utils.create_job_exe(job=self.job1) self.product1 = product_test_utils.create_product(job_exe=self.job_exe1, has_been_published=True, file_name='test.txt') self.job_type2 = job_test_utils.create_job_type(name='test2', category='test-2', is_operational=False) self.job2 = job_test_utils.create_job(job_type=self.job_type2) self.job_exe2 = job_test_utils.create_job_exe(job=self.job2) self.product2 = product_test_utils.create_product(job_exe=self.job_exe2, has_been_published=True)
def test_calculate_stats_partial(self): """Tests individual statistics are null when information is unavailable.""" job_type = job_test_utils.create_job_type() job_test_utils.create_job(job_type=job_type, status='FAILED', ended=datetime.datetime(2015, 1, 1)) job_test_utils.create_job(job_type=job_type, status='CANCELED', ended=datetime.datetime(2015, 1, 1)) MetricsJobType.objects.calculate(datetime.date(2015, 1, 1)) entries = MetricsJobType.objects.filter(occurred=datetime.date(2015, 1, 1)) self.assertEqual(len(entries), 1) entry = entries.first() self.assertEqual(entry.occurred, datetime.date(2015, 1, 1)) self.assertEqual(entry.completed_count, 0) self.assertEqual(entry.failed_count, 1) self.assertEqual(entry.canceled_count, 1) self.assertEqual(entry.total_count, 2) self.assertEqual(entry.error_system_count, 0) self.assertEqual(entry.error_data_count, 0) self.assertEqual(entry.error_algorithm_count, 0) self.assertIsNone(entry.queue_time_sum) self.assertIsNone(entry.queue_time_min) self.assertIsNone(entry.queue_time_max) self.assertIsNone(entry.queue_time_avg) self.assertIsNone(entry.pre_time_sum) self.assertIsNone(entry.pre_time_min) self.assertIsNone(entry.pre_time_max) self.assertIsNone(entry.pre_time_avg) self.assertIsNone(entry.job_time_sum) self.assertIsNone(entry.job_time_min) self.assertIsNone(entry.job_time_max) self.assertIsNone(entry.job_time_avg) self.assertIsNone(entry.post_time_sum) self.assertIsNone(entry.post_time_min) self.assertIsNone(entry.post_time_max) self.assertIsNone(entry.post_time_avg) self.assertIsNone(entry.run_time_sum) self.assertIsNone(entry.run_time_min) self.assertIsNone(entry.run_time_max) self.assertIsNone(entry.run_time_avg) self.assertIsNone(entry.stage_time_sum) self.assertIsNone(entry.stage_time_min) self.assertIsNone(entry.stage_time_max) self.assertIsNone(entry.stage_time_avg)
def test_running(self): """Tests getting running jobs regardless of time filters.""" old_timestamp = datetime.datetime(2015, 1, 1, tzinfo=timezone.utc) job_test_utils.create_job(job_type=self.job_type, status="COMPLETED", last_status_change=old_timestamp) job_test_utils.create_job(job_type=self.job_type, status="RUNNING", last_status_change=old_timestamp) new_timestamp = datetime.datetime(2015, 1, 10, tzinfo=timezone.utc) job_test_utils.create_job(job_type=self.job_type, status="COMPLETED", last_status_change=new_timestamp) job_test_utils.create_job(job_type=self.job_type, status="RUNNING", last_status_change=new_timestamp) url = "/job-types/status/?started=2015-01-05T00:00:00Z" response = self.client.generic("GET", url) result = json.loads(response.content) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(result["results"]), 1) self.assertEqual(len(result["results"][0]["job_counts"]), 2) for entry in result["results"][0]["job_counts"]: if entry["status"] == "COMPLETED": self.assertEqual(entry["count"], 1) elif entry["status"] == "RUNNING": self.assertEqual(entry["count"], 2) else: self.fail("Found unexpected job type count status: %s" % entry["status"])
def test_update_status_running(self): """Tests that job attributes are updated when a job is running.""" job_1 = job_test_utils.create_job(num_exes=1, started=None, ended=timezone.now()) job_2 = job_test_utils.create_job(num_exes=1, started=None, ended=timezone.now()) when = timezone.now() Job.objects.update_status([job_1, job_2], 'RUNNING', when) jobs = Job.objects.filter(id__in=[job_1.id, job_2.id]) for job in jobs: self.assertEqual(job.status, 'RUNNING') self.assertEqual(job.started, when) self.assertIsNone(job.ended) self.assertEqual(job.last_status_change, when)
def test_get_available_based_on_usage(self): job1 = job_test_utils.create_job(job_type=self.job_type_3, event=self.trigger_event_1) job_test_utils.create_job_exe(job=job1) job2 = job_test_utils.create_job(self.job_type_3, self.trigger_event_1) job_test_utils.create_job_exe(job=job2) Queue.objects.queue_new_job(self.job_type_3, {}, self.trigger_event_1) Queue.objects.queue_new_job(self.job_type_4, {}, self.trigger_event_1) job_exes = Queue.objects.schedule_jobs_on_node(50, 500, 50, self.node_3) self.assertEqual(len(job_exes), 1) # We should see job type 4 despite 3 being a higher priority since 3 requires too many resources self.assertTrue(job_exes[0].job.job_type == self.job_type_4)
def create_ingest(file_name='test.txt', status='TRANSFERRING', transfer_started=None, transfer_ended=None, ingest_started=None, ingest_ended=None, data_started=None, data_ended=None, workspace=None, strike=None, source_file=None): if not workspace: workspace = storage_test_utils.create_workspace() if not strike: strike = create_strike() if not source_file: source_file = source_test_utils.create_source(file_name=file_name, data_started=data_started, data_ended=data_ended, workspace=workspace) if not transfer_started: transfer_started = timezone.now() if status not in ['QUEUED', 'TRANSFERRING'] and not ingest_started: ingest_started = timezone.now() if status not in ['QUEUED', 'TRANSFERRING', 'INGESTING'] and not ingest_ended: ingest_ended = timezone.now() try: job_type = Ingest.objects.get_ingest_job_type() except: job_type = job_utils.create_job_type() job = job_utils.create_job(job_type=job_type) job_utils.create_job_exe(job=job) return Ingest.objects.create(file_name=file_name, file_size=source_file.file_size, status=status, job=job, bytes_transferred=source_file.file_size, transfer_started=transfer_started, transfer_ended=transfer_ended, media_type='text/plain', ingest_started=ingest_started, ingest_ended=ingest_ended, workspace=workspace, strike=strike, source_file=source_file)
def create_queue(job_type=None, priority=1, cpus_required=1.0, mem_required=512.0, disk_in_required=200.0, disk_out_required=100.0, disk_total_required=300.0): """Creates a queue model for unit testing :param job_type: The job type :type job_type: :class:`job.models.JobType` :param priority: The priority :type priority: int :param cpus_required: The CPUs required in MiB :type cpus_required: float :param mem_required: The memory required in MiB :type mem_required: float :param disk_in_required: The input disk space required in MiB :type disk_in_required: float :param disk_out_required: The output disk space required in MiB :type disk_out_required: float :param disk_total_required: The total disk space required in MiB :type disk_total_required: float """ job = job_test_utils.create_job(job_type=job_type, status='QUEUED') job_exe = job_test_utils.create_job_exe(job=job, status='QUEUED') return Queue.objects.create(job_exe=job_exe, job=job, job_type=job.job_type, priority=priority, cpus_required=cpus_required, mem_required=mem_required, disk_in_required=disk_in_required, disk_out_required=disk_out_required, disk_total_required=disk_total_required, queued=timezone.now())
def create_recipe_job(recipe=None, job_name=None, job=None): """Creates a job type model for unit testing :param recipe: The associated recipe :type recipe: :class:'recipe.models.Recipe' :param job_name: The associated name for the recipe job :type job_name: string :param job: The associated job :type job: :class:'job.models.Job' :returns: The recipe job model :rtype: :class:`recipe.models.RecipeJob` """ if not recipe: recipe = create_recipe() if not job_name: job_name = 'Test Job Name' if not job: job = job_test_utils.create_job() recipe_job = RecipeJob() recipe_job.job_name = job_name recipe_job.job = job recipe_job.recipe = recipe recipe_job.save() return recipe_job
def testResourceRemainingReduceByJob(self): job = job_test_utils.create_job(self.job_type_1) job_test_utils.create_job_exe(job=job) remaining = SharedResource.objects.get_resource_remaining(self.resource_1) self.assertEqual(remaining, RESOURCE_LIMIT - JOB_TYPE_1_USAGE)
def test_update_status_pending(self): '''Tests that job attributes are updated when a job is pending.''' job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status(job, u'PENDING', timezone.now()) self.assertEqual(job.status, u'PENDING')
def testResourceRemainingUnrelatedJob(self): job = job_test_utils.create_job(self.job_type_1) job_test_utils.create_job_exe(job=job) remaining = SharedResource.objects.get_resource_remaining(self.resource_2) self.assertEqual(remaining, RESOURCE_LIMIT)
def test_update_status_blocked(self): '''Tests that job attributes are updated when a job is blocked.''' job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status(job, u'BLOCKED', timezone.now()) self.assertEqual(job.status, u'BLOCKED')
def setUp(self): django.setup() job_type = job_test_utils.create_job_type(max_tries=1) job = job_test_utils.create_job(job_type=job_type, num_exes=1) job_exe = job_test_utils.create_job_exe(job=job, status='RUNNING') self._job_exe_id = job_exe.id
def test_update_status_completed(self): """Tests that job attributes are updated when a job is completed.""" job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status([job], 'COMPLETED', timezone.now()) self.assertEqual(job.status, 'COMPLETED') self.assertIsNotNone(job.ended)
def test_update_status_canceled(self): '''Tests that job attributes are updated when a job is canceled.''' job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status(job, u'CANCELED', timezone.now()) self.assertEqual(job.status, u'CANCELED') self.assertIsNotNone(job.ended)
def setUp(self): django.setup() Scheduler.objects.initialize_scheduler() job_type = job_test_utils.create_job_type(max_tries=1) job = job_test_utils.create_job(job_type=job_type, num_exes=1) job_exe = job_test_utils.create_job_exe(job=job, status='RUNNING') self._job_exe_id = job_exe.id
def setUp(self): django.setup() self.file = storage_test_utils.create_file() self.job_type1 = job_test_utils.create_job_type(name="test1", category="test-1") self.job1 = job_test_utils.create_job( job_type=self.job_type1, status="RUNNING", data={"input_data": [{"name": "input_file", "file_id": self.file.id}]}, ) self.job_type2 = job_test_utils.create_job_type(name="test2", category="test-2") self.job2 = job_test_utils.create_job( job_type=self.job_type2, status="PENDING", data={"input_data": [{"name": "input_file", "file_id": self.file.id}]}, )
def setUp(self): django.setup() self.job_type_1 = job_test_utils.create_job_type() self.job_type_2 = job_test_utils.create_job_type() self.job_1 = job_test_utils.create_job(job_type=self.job_type_1, status="COMPLETED") self.job_exe_1a = job_test_utils.create_job_exe( job=self.job_1, status="FAILED", created=timezone.now() - datetime.timedelta(hours=3) ) self.job_exe_1b = job_test_utils.create_job_exe( job=self.job_1, status="FAILED", created=timezone.now() - datetime.timedelta(hours=2) ) self.job_exe_1c = job_test_utils.create_job_exe( job=self.job_1, status="FAILED", created=timezone.now() - datetime.timedelta(hours=1), last_modified=timezone.now() - datetime.timedelta(hours=1), ) self.last_exe_1 = job_test_utils.create_job_exe(job=self.job_1, status="RUNNING") self.job_2 = job_test_utils.create_job(job_type=self.job_type_1, status="FAILED") self.last_exe_2 = job_test_utils.create_job_exe(job=self.job_2, status="FAILED") job_3 = job_test_utils.create_job(job_type=self.job_type_2, status="RUNNING") job_test_utils.create_job_exe( job=job_3, status="FAILED", created=timezone.now() - datetime.timedelta(hours=3), last_modified=timezone.now() - datetime.timedelta(hours=2), ) job_test_utils.create_job_exe( job=job_3, status="FAILED", created=timezone.now() - datetime.timedelta(hours=2), last_modified=timezone.now() - datetime.timedelta(hours=1), ) job_test_utils.create_job_exe( job=job_3, status="COMPLETED", created=timezone.now() - datetime.timedelta(hours=1) ) job_test_utils.create_job_exe(job=job_3, status="RUNNING") job_4 = job_test_utils.create_job(job_type=self.job_type_2, status="COMPLETED") job_test_utils.create_job_exe(job=job_4, status="COMPLETED")
def setUp(self): django.setup() self.job_a = job_test_utils.create_job() self.job_b = job_test_utils.create_job() self.job_c = job_test_utils.create_job() self.job_d = job_test_utils.create_job() self.job_e = job_test_utils.create_job() self.job_f = job_test_utils.create_job() self.job_g = job_test_utils.create_job() self.job_h = job_test_utils.create_job()
def test_calculate_repeated(self): '''Tests regenerating metrics for a date that already has metrics.''' job = job_test_utils.create_job(status='COMPLETED', ended=datetime.datetime(2015, 1, 1)) job_test_utils.create_job_exe(job=job, status=job.status, ended=job.ended) MetricsJobType.objects.calculate(datetime.date(2015, 1, 1)) MetricsJobType.objects.calculate(datetime.date(2015, 1, 1)) entries = MetricsJobType.objects.filter(occurred=datetime.date(2015, 1, 1)) self.assertEqual(len(entries), 1)
def test_queue_job_timestamps(self): '''Tests that job attributes are updated when a job is queued.''' job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.queue_job(job, None, timezone.now()) self.assertEqual(job.status, u'QUEUED') self.assertIsNotNone(job.queued) self.assertIsNone(job.started) self.assertIsNone(job.ended)
def test_order_by(self): """Tests successfully calling the jobs view with sorting.""" job_type1b = job_test_utils.create_job_type(name="test1", version="2.0", category="test-1") job1b = job_test_utils.create_job(job_type=job_type1b, status="RUNNING") job_type1c = job_test_utils.create_job_type(name="test1", version="3.0", category="test-1") job1c = job_test_utils.create_job(job_type=job_type1c, status="RUNNING") url = "/jobs/?order=job_type__name&order=-job_type__version" response = self.client.generic("GET", url) result = json.loads(response.content) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(result["results"]), 4) self.assertEqual(result["results"][0]["job_type"]["id"], job_type1c.id) self.assertEqual(result["results"][1]["job_type"]["id"], job_type1b.id) self.assertEqual(result["results"][2]["job_type"]["id"], self.job_type1.id) self.assertEqual(result["results"][3]["job_type"]["id"], self.job_type2.id)
def setUp(self): django.setup() self.input_name_1 = 'Test Input 1' self.output_name_1 = 'Test Output 1' interface_1 = { 'version': '1.0', 'command': 'my_cmd', 'command_arguments': 'args', 'input_data': [{ 'name': self.input_name_1, 'type': 'file', 'media_types': ['text/plain'], }], 'output_data': [{ 'name': self.output_name_1, 'type': 'files', 'media_type': 'image/png', }], } self.job_type_1 = job_test_utils.create_job_type(interface=interface_1) self.job_1 = job_test_utils.create_job(job_type=self.job_type_1) self.input_name_2 = 'Test Input 2' self.output_name_2 = 'Test Output 2' interface_2 = { 'version': '1.0', 'command': 'my_cmd', 'command_arguments': 'args', 'input_data': [{ 'name': self.input_name_2, 'type': 'files', 'media_types': ['image/png', 'image/tiff'], }], 'output_data': [{ 'name': self.output_name_2, 'type': 'file', }], } self.job_type_2 = job_test_utils.create_job_type(interface=interface_2) self.job_2 = job_test_utils.create_job(job_type=self.job_type_2) self.file_1 = storage_test_utils.create_file(media_type='text/plain')
def test_calculate_stats_partial(self): """Tests individual statistics are null when information is unavailable.""" job_type = job_test_utils.create_seed_job_type() job_test_utils.create_job(job_type=job_type, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job(job_type=job_type, status='CANCELED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) MetricsJobType.objects.calculate( datetime.datetime(2015, 1, 1, tzinfo=utc)) entries = MetricsJobType.objects.filter( occurred=datetime.datetime(2015, 1, 1, tzinfo=utc)) self.assertEqual(len(entries), 1) entry = entries.first() self.assertEqual(entry.occurred, datetime.datetime(2015, 1, 1, tzinfo=utc)) self.assertEqual(entry.completed_count, 0) self.assertEqual(entry.failed_count, 1) self.assertEqual(entry.canceled_count, 1) self.assertEqual(entry.total_count, 2) self.assertEqual(entry.error_system_count, 0) self.assertEqual(entry.error_data_count, 0) self.assertEqual(entry.error_algorithm_count, 0) self.assertIsNone(entry.queue_time_sum) self.assertIsNone(entry.queue_time_min) self.assertIsNone(entry.queue_time_max) self.assertIsNone(entry.queue_time_avg) self.assertIsNone(entry.pre_time_sum) self.assertIsNone(entry.pre_time_min) self.assertIsNone(entry.pre_time_max) self.assertIsNone(entry.pre_time_avg) self.assertIsNone(entry.job_time_sum) self.assertIsNone(entry.job_time_min) self.assertIsNone(entry.job_time_max) self.assertIsNone(entry.job_time_avg) self.assertIsNone(entry.post_time_sum) self.assertIsNone(entry.post_time_min) self.assertIsNone(entry.post_time_max) self.assertIsNone(entry.post_time_avg) self.assertIsNone(entry.run_time_sum) self.assertIsNone(entry.run_time_min) self.assertIsNone(entry.run_time_max) self.assertIsNone(entry.run_time_avg) self.assertIsNone(entry.stage_time_sum) self.assertIsNone(entry.stage_time_min) self.assertIsNone(entry.stage_time_max) self.assertIsNone(entry.stage_time_avg)
def test_process_job_input(self): """Tests calling JobManager.process_job_input()""" date_1 = timezone.now() min_src_started_job_1 = date_1 - datetime.timedelta(days=200) max_src_ended_job_1 = date_1 + datetime.timedelta(days=200) date_2 = date_1 + datetime.timedelta(minutes=30) date_3 = date_1 + datetime.timedelta(minutes=40) date_4 = date_1 + datetime.timedelta(minutes=50) min_src_started_job_2 = date_1 - datetime.timedelta(days=500) max_src_ended_job_2 = date_1 + datetime.timedelta(days=500) s_class = 'A' s_sensor = '1' collection = '12345' task = 'abcd' workspace = storage_test_utils.create_workspace() file_1 = storage_test_utils.create_file(workspace=workspace, file_size=10485760.0, source_sensor_class=s_class, source_sensor=s_sensor, source_collection=collection, source_task=task) file_2 = storage_test_utils.create_file(workspace=workspace, file_size=104857600.0, source_started=date_2, source_ended=date_3, source_sensor_class=s_class, source_sensor=s_sensor, source_collection=collection, source_task=task) file_3 = storage_test_utils.create_file( workspace=workspace, file_size=987654321.0, source_started=min_src_started_job_1, source_ended=date_4) file_4 = storage_test_utils.create_file( workspace=workspace, file_size=46546.0, source_ended=max_src_ended_job_1) file_5 = storage_test_utils.create_file(workspace=workspace, file_size=83457.0, source_started=date_2) file_6 = storage_test_utils.create_file(workspace=workspace, file_size=42126588636633.0, source_ended=date_4) file_7 = storage_test_utils.create_file(workspace=workspace, file_size=76645464662354.0) file_8 = storage_test_utils.create_file( workspace=workspace, file_size=4654.0, source_started=min_src_started_job_2) file_9 = storage_test_utils.create_file( workspace=workspace, file_size=545.0, source_started=date_3, source_ended=max_src_ended_job_2) file_10 = storage_test_utils.create_file(workspace=workspace, file_size=0.154, source_ended=date_4, source_sensor_class=s_class, source_sensor=s_sensor, source_collection=collection, source_task=task) interface = { 'command': 'my_command', 'inputs': { 'files': [{ 'name': 'Input 1', 'mediaTypes': ['text/plain'], }, { 'name': 'Input 2', 'mediaTypes': ['text/plain'], }] }, 'outputs': { 'files': [{ 'name': 'Output 1', 'mediaType': 'image/png', }] } } job_type = job_test_utils.create_seed_job_type(interface=interface) data_1 = { 'version': '1.0', 'input_data': [{ 'name': 'Input 1', 'file_id': file_1.id }, { 'name': 'Input 2', 'file_ids': [file_2.id, file_3.id, file_4.id, file_5.id] }], 'output_data': [{ 'name': 'Output 1', 'workspace_id': workspace.id }] } data_2 = { 'version': '1.0', 'input_data': [{ 'name': 'Input 1', 'file_id': file_6.id }, { 'name': 'Input 2', 'file_ids': [file_7.id, file_8.id, file_9.id, file_10.id] }], 'output_data': [{ 'name': 'Output 1', 'workspace_id': workspace.id }] } job_1 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='PENDING', input_file_size=None, input=data_1) job_2 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='PENDING', input_file_size=None, input=data_2) # Execute method Job.objects.process_job_input(job_1) Job.objects.process_job_input(job_2) # Retrieve updated job models jobs = Job.objects.filter(id__in=[job_1.id, job_2.id]).order_by('id') job_1 = jobs[0] job_2 = jobs[1] # Check jobs for expected fields self.assertEqual(job_1.input_file_size, 1053.0) self.assertEqual(job_1.source_started, min_src_started_job_1) self.assertEqual(job_1.source_ended, max_src_ended_job_1) self.assertEqual(job_1.source_sensor_class, s_class) self.assertEqual(job_1.source_sensor, s_sensor) self.assertEqual(job_1.source_collection, collection) self.assertEqual(job_1.source_task, task) self.assertEqual(job_2.input_file_size, 113269857.0) self.assertEqual(job_2.source_started, min_src_started_job_2) self.assertEqual(job_2.source_ended, max_src_ended_job_2) self.assertEqual(job_2.source_sensor_class, s_class) self.assertEqual(job_2.source_sensor, s_sensor) self.assertEqual(job_2.source_collection, collection) self.assertEqual(job_2.source_task, task) # Make sure job input file models are created job_input_files = JobInputFile.objects.filter(job_id=job_1.id) self.assertEqual(len(job_input_files), 5) input_files_dict = {'Input 1': set(), 'Input 2': set()} for job_input_file in job_input_files: input_files_dict[job_input_file.job_input].add( job_input_file.input_file_id) self.assertDictEqual( input_files_dict, { 'Input 1': {file_1.id}, 'Input 2': {file_2.id, file_3.id, file_4.id, file_5.id} }) job_input_files = JobInputFile.objects.filter(job_id=job_2.id) self.assertEqual(len(job_input_files), 5) input_files_dict = {'Input 1': set(), 'Input 2': set()} for job_input_file in job_input_files: input_files_dict[job_input_file.job_input].add( job_input_file.input_file_id) self.assertDictEqual( input_files_dict, { 'Input 1': {file_6.id}, 'Input 2': {file_7.id, file_8.id, file_9.id, file_10.id} })
def setUp(self): django.setup() data_dict = convert_data_to_v6_json(Data()).get_dict() self.new_priority = 200 self.standalone_failed_job = job_test_utils.create_job(status='FAILED', input=data_dict, num_exes=3, priority=100) self.standalone_superseded_job = job_test_utils.create_job(status='FAILED', input=data_dict, num_exes=1) self.standalone_canceled_job = job_test_utils.create_job(status='CANCELED', input=data_dict, num_exes=1, priority=100) self.standalone_completed_job = job_test_utils.create_job(status='COMPLETED', input=data_dict,) Job.objects.supersede_jobs_old([self.standalone_superseded_job], now()) # Create recipe for re-queing a job that should now be PENDING (and its dependencies) job_type_a_1 = job_test_utils.create_job_type() job_type_a_2 = job_test_utils.create_job_type() definition_a = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'Job 1', 'job_type': { 'name': job_type_a_1.name, 'version': job_type_a_1.version, } }, { 'name': 'Job 2', 'job_type': { 'name': job_type_a_2.name, 'version': job_type_a_2.version, }, 'dependencies': [{ 'name': 'Job 1' }], }], } recipe_type_a = recipe_test_utils.create_recipe_type(definition=definition_a) data_a = { 'version': '1.0', 'input_data': [], 'workspace_id': 1, } recipe_a = recipe_test_utils.create_recipe(recipe_type=recipe_type_a, input=data_a) self.job_a_1 = job_test_utils.create_job(job_type=job_type_a_1, status='FAILED', input=data_dict, num_exes=1, recipe=recipe_a) self.job_a_2 = job_test_utils.create_job(job_type=job_type_a_2, status='BLOCKED', recipe=recipe_a) recipe_test_utils.create_recipe_job(recipe=recipe_a, job_name='Job 1', job=self.job_a_1) recipe_test_utils.create_recipe_job(recipe=recipe_a, job_name='Job 2', job=self.job_a_2) # Create recipe for re-queing a job that should now be BLOCKED (and its dependencies) job_type_b_1 = job_test_utils.create_job_type() job_type_b_2 = job_test_utils.create_job_type() job_type_b_3 = job_test_utils.create_job_type() definition_b = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'Job 1', 'job_type': { 'name': job_type_b_1.name, 'version': job_type_b_1.version, } }, { 'name': 'Job 2', 'job_type': { 'name': job_type_b_2.name, 'version': job_type_b_2.version, }, 'dependencies': [{ 'name': 'Job 1' }], }, { 'name': 'Job 3', 'job_type': { 'name': job_type_b_3.name, 'version': job_type_b_3.version, }, 'dependencies': [{ 'name': 'Job 2' }], }], } recipe_type_b = recipe_test_utils.create_recipe_type(definition=definition_b) data_b = { 'version': '1.0', 'input_data': [], 'workspace_id': 1, } recipe_b = recipe_test_utils.create_recipe(recipe_type=recipe_type_b, input=data_b) self.job_b_1 = job_test_utils.create_job(job_type=job_type_b_1, status='FAILED', input=data_dict, recipe=recipe_b) self.job_b_2 = job_test_utils.create_job(job_type=job_type_b_2, status='CANCELED', num_exes=0, recipe=recipe_b) self.job_b_3 = job_test_utils.create_job(job_type=job_type_b_3, status='BLOCKED', num_exes=0, recipe=recipe_b) recipe_test_utils.create_recipe_job(recipe=recipe_b, job_name='Job 1', job=self.job_b_1) recipe_test_utils.create_recipe_job(recipe=recipe_b, job_name='Job 2', job=self.job_b_2) recipe_test_utils.create_recipe_job(recipe=recipe_b, job_name='Job 3', job=self.job_b_3) # Job IDs to re-queue self.job_ids = [self.standalone_failed_job.id, self.standalone_canceled_job.id, self.standalone_completed_job.id, self.job_a_1.id, self.job_b_2.id]
def setUp(self): django.setup() rest.login_client(self.client, is_staff=True) # create a couple job types manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST) manifest['job']['name'] = 'test-job-1' manifest['job']['interface']['inputs'] = { 'files': [{ 'name': 'INPUT_FILE', 'required': True, 'mediaTypes': ['image/png'], 'partial': False }] } self.job_type_1 = job_test_utils.create_seed_job_type( manifest=manifest) manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST) manifest['job']['name'] = 'test-job-2' manifest['job']['interface']['inputs'] = { 'files': [{ 'name': 'INPUT_FILE', 'required': True, 'mediaTypes': ['image/png'], 'partial': False }] } self.job_type_2 = job_test_utils.create_seed_job_type( manifest=manifest) # create recipe types recipe_def = { 'version': '7', 'input': { 'files': [{ 'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True, 'multiple': False }], 'json': [] }, 'nodes': { 'node_a': { 'dependencies': [], 'input': { 'INPUT_FILE': { 'type': 'recipe', 'input': 'INPUT_FILE' } }, 'node_type': { 'node_type': 'job', 'job_type_name': self.job_type_1.name, 'job_type_version': self.job_type_1.version, 'job_type_revision': self.job_type_1.revision_num } } } } self.recipe_type_1 = recipe_test_utils.create_recipe_type_v6( definition=recipe_def) recipe_def = { 'version': '7', 'input': { 'files': [{ 'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True, 'multiple': False }], 'json': [] }, 'nodes': { 'node_a': { 'dependencies': [], 'input': { 'INPUT_FILE': { 'type': 'recipe', 'input': 'INPUT_FILE' } }, 'node_type': { 'node_type': 'job', 'job_type_name': self.job_type_2.name, 'job_type_version': self.job_type_2.version, 'job_type_revision': self.job_type_2.revision_num } }, 'node_b': { 'dependencies': [], 'input': { 'INPUT_FILE': { 'type': 'recipe', 'input': 'INPUT_FILE' } }, 'node_type': { 'node_type': 'job', 'job_type_name': self.job_type_1.name, 'job_type_version': self.job_type_1.version, 'job_type_revision': self.job_type_1.revision_num } } } } self.recipe_type_2 = recipe_test_utils.create_recipe_type_v6( definition=recipe_def) # create recipes & jobs self.workspace = storage_test_utils.create_workspace() for i in range(1, 7): date_1 = datetime.datetime(2020, 1, i, tzinfo=utc) date_2 = datetime.datetime(2020, 1, i + 1, tzinfo=utc) date_3 = datetime.datetime(2020, i, i + 1, tzinfo=utc) file_1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0, source_started=date_1, source_ended=date_2) input_data = { 'version': '1.0', 'input_data': [{ 'name': 'INPUT_FILE', 'file_id': file_1.id }] } # Recipe 1's jobs recipe_1 = recipe_test_utils.create_recipe( recipe_type=self.recipe_type_1, input=input_data) job_1 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_1, ended=date_1) job_1.recipe_id = recipe_1.id job_1.save() # Recipe 2s jobs recipe_2 = recipe_test_utils.create_recipe( recipe_type=self.recipe_type_2, input=input_data) job_2 = job_test_utils.create_job(job_type=self.job_type_2, status='COMPLETED', started=date_2, ended=date_2) job_2.recipe_id = recipe_2.id job_2.save() job_3 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_3, ended=date_3) job_3.recipe_id = recipe_2.id job_3.save()
def test_calculate_job_type(self): """Tests calculating job load grouping by job type.""" job_type1 = job_test_utils.create_job_type() job_test_utils.create_job(job_type=job_type1, status='PENDING') job_type2 = job_test_utils.create_job_type() job_test_utils.create_job(job_type=job_type2, status='QUEUED') job_test_utils.create_job(job_type=job_type2, status='QUEUED') job_type3 = job_test_utils.create_job_type() job_test_utils.create_job(job_type=job_type3, status='RUNNING') job_test_utils.create_job(job_type=job_type3, status='RUNNING') job_test_utils.create_job(job_type=job_type3, status='RUNNING') JobLoad.objects.calculate() results = JobLoad.objects.all() self.assertEqual(len(results), 3) for result in results: if result.job_type_id == job_type1.id: self.assertEqual(result.pending_count, 1) self.assertEqual(result.queued_count, 0) self.assertEqual(result.running_count, 0) self.assertEqual(result.total_count, 1) elif result.job_type_id == job_type2.id: self.assertEqual(result.pending_count, 0) self.assertEqual(result.queued_count, 2) self.assertEqual(result.running_count, 0) self.assertEqual(result.total_count, 2) elif result.job_type_id == job_type3.id: self.assertEqual(result.pending_count, 0) self.assertEqual(result.queued_count, 0) self.assertEqual(result.running_count, 3) self.assertEqual(result.total_count, 3) else: self.fail('Found unexpected job type: %i' % result.job_type_id)
def test_execute(self): """Tests calling CompletedJobs.execute() successfully""" job_1 = job_test_utils.create_job(num_exes=1, status='QUEUED') job_test_utils.create_job_exe(job=job_1) job_2 = job_test_utils.create_job(num_exes=1, status='RUNNING') job_test_utils.create_job_exe(job=job_2, output=JobResults()) job_3 = job_test_utils.create_job(num_exes=0, status='PENDING') job_ids = [job_1.id, job_2.id, job_3.id] from recipe.test import utils as recipe_test_utils recipe_1 = recipe_test_utils.create_recipe() recipe_test_utils.create_recipe_job(recipe=recipe_1, job=job_2) when_ended = now() # Add jobs to message message = CompletedJobs() message.ended = when_ended if message.can_fit_more(): message.add_completed_job(CompletedJob(job_1.id, job_1.num_exes)) if message.can_fit_more(): message.add_completed_job(CompletedJob(job_2.id, job_2.num_exes)) if message.can_fit_more(): message.add_completed_job(CompletedJob(job_3.id, job_3.num_exes)) # Execute message result = message.execute() self.assertTrue(result) jobs = Job.objects.filter(id__in=job_ids).order_by('id') self.assertEqual(len(message.new_messages), 3) update_recipe_metrics_msg = None update_recipes_msg = None publish_job_msg = None for msg in message.new_messages: if msg.type == 'update_recipes': update_recipes_msg = msg elif msg.type == 'publish_job': publish_job_msg = msg elif msg.type == 'update_recipe_metrics': update_recipe_metrics_msg = msg self.assertIsNotNone(update_recipes_msg) self.assertIsNotNone(publish_job_msg) self.assertIsNotNone(update_recipe_metrics_msg) # Job 2 was only job both completed and with output self.assertEqual(len(update_recipes_msg._recipe_ids), 1) self.assertEqual(publish_job_msg.job_id, job_2.id) # Job 1 should be completed self.assertEqual(jobs[0].status, 'COMPLETED') self.assertEqual(jobs[0].num_exes, 1) self.assertEqual(jobs[0].ended, when_ended) # Job 2 should be completed and has output, so should be in update_recipes message self.assertEqual(jobs[1].status, 'COMPLETED') self.assertEqual(jobs[1].num_exes, 1) self.assertEqual(jobs[1].ended, when_ended) self.assertTrue(recipe_1.id in update_recipes_msg._recipe_ids) # Job 3 should ignore update self.assertEqual(jobs[2].status, 'PENDING') self.assertEqual(jobs[2].num_exes, 0) # Test executing message again new_ended = when_ended + datetime.timedelta(minutes=5) message_json_dict = message.to_json() message = CompletedJobs.from_json(message_json_dict) message.ended = new_ended result = message.execute() self.assertTrue(result) # Should have the same messages as before jobs = Job.objects.filter(id__in=job_ids).order_by('id') self.assertEqual(len(message.new_messages), 3) update_recipe_metrics_msg = None update_recipes_msg = None publish_job_msg = None for msg in message.new_messages: if msg.type == 'update_recipes': update_recipes_msg = msg elif msg.type == 'publish_job': publish_job_msg = msg elif msg.type == 'update_recipe_metrics': update_recipe_metrics_msg = msg self.assertIsNotNone(update_recipes_msg) self.assertIsNotNone(publish_job_msg) self.assertIsNotNone(update_recipe_metrics_msg) # Job 2 was only job both completed and with output self.assertEqual(len(update_recipes_msg._recipe_ids), 1) self.assertEqual(publish_job_msg.job_id, job_2.id) # Should have the same models as before self.assertEqual(jobs[0].status, 'COMPLETED') self.assertEqual(jobs[0].num_exes, 1) self.assertEqual(jobs[0].ended, when_ended) # Job 2 should be completed and has output, so should be in update_recipes message self.assertEqual(jobs[1].status, 'COMPLETED') self.assertEqual(jobs[1].num_exes, 1) self.assertEqual(jobs[1].ended, when_ended) self.assertTrue(recipe_1.id in update_recipes_msg._recipe_ids) # Job 3 should ignore update self.assertEqual(jobs[2].status, 'PENDING') self.assertEqual(jobs[2].num_exes, 0)
def test_calculate_filtered(self): """Tests generating metrics with only certain job executions.""" job_test_utils.create_job(status='QUEUED') job_test_utils.create_job(status='RUNNING') job_test_utils.create_job(status='FAILED') job_test_utils.create_job(status='COMPLETED') job_test_utils.create_job(status='CANCELED') job1 = job_test_utils.create_job(status='QUEUED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job1, status=job1.status, ended=job1.ended) job2 = job_test_utils.create_job(status='RUNNING', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job2, status=job2.status, ended=job2.ended) job3 = job_test_utils.create_job(status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job3, status=job3.status, ended=job3.ended) job4 = job_test_utils.create_job(status='COMPLETED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job4, status=job4.status, ended=job4.ended) job5 = job_test_utils.create_job(status='CANCELED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job5, status=job5.status, ended=job5.ended) MetricsJobType.objects.calculate( datetime.datetime(2015, 1, 1, tzinfo=utc)) entries = MetricsJobType.objects.filter( occurred=datetime.datetime(2015, 1, 1, tzinfo=utc)) self.assertEqual(len(entries), 3)
def test_type_versions(self): """Tests calling /timeline/job-types filtered by job version types""" manifest = copy.deepcopy(self.job_type_1.manifest) manifest['job']['jobVersion'] = '1.1.1' job_type = job_test_utils.create_seed_job_type(manifest=manifest) recipe_def = { 'version': '7', 'input': { 'files': [{ 'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True, 'multiple': False }], 'json': [] }, 'nodes': { 'node_a': { 'dependencies': [], 'input': { 'INPUT_FILE': { 'type': 'recipe', 'input': 'INPUT_FILE' } }, 'node_type': { 'node_type': 'job', 'job_type_name': job_type.name, 'job_type_version': job_type.version, 'job_type_revision': job_type.revision_num } } } } recipe_test_utils.edit_recipe_type_v6(self.recipe_type_1, definition=recipe_def, auto_update=False) recipe_edited = RecipeType.objects.get(id=self.recipe_type_1.id) for i in range(1, 7): date_1 = datetime.datetime(2020, 1, i, tzinfo=utc) date_2 = datetime.datetime(2020, 1, i + 1, tzinfo=utc) file_1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0, source_started=date_1, source_ended=date_2) input_data = { 'version': '1.0', 'input_data': [{ 'name': 'INPUT_FILE', 'file_id': file_1.id }] } recipe_1 = recipe_test_utils.create_recipe( recipe_type=recipe_edited, input=input_data) job_1 = job_test_utils.create_job(job_type=job_type, status='COMPLETED', started=date_1, ended=date_1) job_1.recipe_id = recipe_1.id job_1.save() started = '2020-01-01T00:00:00Z' ended = '2020-02-01T00:00:00Z' url = '/%s/timeline/job-types/?started=%s&ended=%s&name=%s&version=%s' % ( self.api, started, ended, job_type.name, job_type.version) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) results = result['results'] self.assertEqual(len(results), 1) self.assertEqual(results[0]['name'], job_type.name) self.assertEqual(results[0]['title'], job_type.get_title()) self.assertEqual(results[0]['version'], job_type.version) self.assertEqual(results[0]['revision_num'], job_type.revision_num)
def test_execute(self): """Tests calling UpdateRecipes.execute() successfully""" self.job_1_failed = job_test_utils.create_job(status='FAILED') self.job_1_pending = job_test_utils.create_job(status='PENDING') definition_1 = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'job_failed', 'job_type': { 'name': self.job_1_failed.job_type.name, 'version': self.job_1_failed.job_type.version, }, }, { 'name': 'job_pending', 'job_type': { 'name': self.job_1_pending.job_type.name, 'version': self.job_1_pending.job_type.version, }, 'dependencies': [{ 'name': 'job_failed', }], }], } self.recipe_type_1 = recipe_test_utils.create_recipe_type(definition=definition_1) self.recipe_1 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_1) recipe_test_utils.create_recipe_job(recipe=self.recipe_1, job_name='job_failed', job=self.job_1_failed) recipe_test_utils.create_recipe_job(recipe=self.recipe_1, job_name='job_pending', job=self.job_1_pending) self.job_2_running = job_test_utils.create_job(status='RUNNING') self.job_2_blocked = job_test_utils.create_job(status='BLOCKED') definition_2 = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'job_running', 'job_type': { 'name': self.job_2_running.job_type.name, 'version': self.job_2_running.job_type.version, }, }, { 'name': 'job_blocked', 'job_type': { 'name': self.job_2_blocked.job_type.name, 'version': self.job_2_blocked.job_type.version, }, 'dependencies': [{ 'name': 'job_running', }], }], } self.recipe_type_2 = recipe_test_utils.create_recipe_type(definition=definition_2) self.recipe_2 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_2) recipe_test_utils.create_recipe_job(recipe=self.recipe_2, job_name='job_running', job=self.job_2_running) recipe_test_utils.create_recipe_job(recipe=self.recipe_2, job_name='job_blocked', job=self.job_2_blocked) # Add recipes to message message = UpdateRecipes() if message.can_fit_more(): message.add_recipe(self.recipe_1.id) if message.can_fit_more(): message.add_recipe(self.recipe_2.id) # Execute message result = message.execute() self.assertTrue(result) self.assertEqual(len(message.new_messages), 2) # Check message types blocked_jobs_msg = False pending_jobs_msg = False for new_msg in message.new_messages: if new_msg.type == 'blocked_jobs': blocked_jobs_msg = True elif new_msg.type == 'pending_jobs': pending_jobs_msg = True self.assertTrue(blocked_jobs_msg) self.assertTrue(pending_jobs_msg)
def test_execute_with_top_level_recipe(self): """Tests calling UpdateRecipeMetrics.execute() successfully where messages need to be sent to update a top-level recipe """ batch = batch_test_utils.create_batch() top_recipe = recipe_test_utils.create_recipe(batch=batch) recipe = recipe_test_utils.create_recipe(batch=batch) recipe.recipe = top_recipe recipe.root_recipe = top_recipe recipe.save() recipe_node_1 = recipe_test_utils.create_recipe_node(recipe=top_recipe, sub_recipe=recipe) forced_nodes = ForcedNodes() forced_nodes.set_all_nodes() forced_nodes_dict = convert_forced_nodes_to_v6(forced_nodes).get_dict() # Recipe jobs job_1 = job_test_utils.create_job(status='FAILED', save=False) job_2 = job_test_utils.create_job(status='CANCELED', save=False) job_3 = job_test_utils.create_job(status='BLOCKED', save=False) job_4 = job_test_utils.create_job(status='BLOCKED', save=False) job_5 = job_test_utils.create_job(status='COMPLETED', save=False) Job.objects.bulk_create([job_1, job_2, job_3, job_4, job_5]) # Recipe nodes recipe_node_2 = recipe_test_utils.create_recipe_node(recipe=recipe, job=job_1) recipe_node_3 = recipe_test_utils.create_recipe_node(recipe=recipe, job=job_2) recipe_node_4 = recipe_test_utils.create_recipe_node(recipe=recipe, job=job_3) recipe_node_5 = recipe_test_utils.create_recipe_node(recipe=recipe, job=job_4) recipe_node_6 = recipe_test_utils.create_recipe_node(recipe=recipe, job=job_5) RecipeNode.objects.bulk_create([ recipe_node_1, recipe_node_2, recipe_node_3, recipe_node_4, recipe_node_5, recipe_node_6 ]) # Add recipes to message message = UpdateRecipeMetrics() if message.can_fit_more(): message.add_recipe(recipe.id) # Execute message result = message.execute() self.assertTrue(result) recipe = Recipe.objects.get(id=recipe.id) self.assertEqual(recipe.jobs_total, 5) self.assertEqual(recipe.jobs_pending, 0) self.assertEqual(recipe.jobs_blocked, 2) self.assertEqual(recipe.jobs_queued, 0) self.assertEqual(recipe.jobs_running, 0) self.assertEqual(recipe.jobs_failed, 1) self.assertEqual(recipe.jobs_completed, 1) self.assertEqual(recipe.jobs_canceled, 1) self.assertEqual(recipe.sub_recipes_total, 0) self.assertEqual(recipe.sub_recipes_completed, 0) # Make sure message is created to update top-level recipe and recipe metrics # There should be no message to update batch metrics since we did not update a top-level recipe self.assertEqual(len(message.new_messages), 2) update_recipe_metrics_msg = message.new_messages[0] update_recipe_msg = message.new_messages[1] self.assertEqual(update_recipe_metrics_msg.type, 'update_recipe_metrics') self.assertListEqual(update_recipe_metrics_msg._recipe_ids, [top_recipe.id]) self.assertEqual(update_recipe_msg.type, 'update_recipe') self.assertEqual(update_recipe_msg.root_recipe_id, top_recipe.id) self.assertDictEqual( convert_forced_nodes_to_v6( update_recipe_msg.forced_nodes).get_dict(), forced_nodes_dict) # Test executing message again message_json_dict = message.to_json() message = UpdateRecipeMetrics.from_json(message_json_dict) result = message.execute() self.assertTrue(result) recipe = Recipe.objects.get(id=recipe.id) self.assertEqual(recipe.jobs_total, 5) self.assertEqual(recipe.jobs_pending, 0) self.assertEqual(recipe.jobs_blocked, 2) self.assertEqual(recipe.jobs_queued, 0) self.assertEqual(recipe.jobs_running, 0) self.assertEqual(recipe.jobs_failed, 1) self.assertEqual(recipe.jobs_completed, 1) self.assertEqual(recipe.jobs_canceled, 1) self.assertEqual(recipe.sub_recipes_total, 0) self.assertEqual(recipe.sub_recipes_completed, 0) # Make sure message is created to update top-level recipe and recipe metrics # There should be no message to update batch metrics since we did not update a top-level recipe self.assertEqual(len(message.new_messages), 2) update_recipe_metrics_msg = message.new_messages[0] update_recipe_msg = message.new_messages[1] self.assertEqual(update_recipe_metrics_msg.type, 'update_recipe_metrics') self.assertListEqual(update_recipe_metrics_msg._recipe_ids, [top_recipe.id]) self.assertEqual(update_recipe_msg.type, 'update_recipe') self.assertEqual(update_recipe_msg.root_recipe_id, top_recipe.id) self.assertDictEqual( convert_forced_nodes_to_v6( update_recipe_msg.forced_nodes).get_dict(), forced_nodes_dict)
def test_calculate_status(self): """Tests calculating job load filtering by status.""" job_type = job_test_utils.create_job_type() job_test_utils.create_job(job_type=job_type, status='PENDING') job_test_utils.create_job(job_type=job_type, status='BLOCKED') job_test_utils.create_job(job_type=job_type, status='QUEUED') job_test_utils.create_job(job_type=job_type, status='RUNNING') job_test_utils.create_job(job_type=job_type, status='COMPLETED') job_test_utils.create_job(job_type=job_type, status='FAILED') job_test_utils.create_job(job_type=job_type, status='CANCELED') JobLoad.objects.calculate() results = JobLoad.objects.all() self.assertEqual(len(results), 1) self.assertEqual(results[0].job_type_id, job_type.id) self.assertIsNotNone(results[0].measured) self.assertEqual(results[0].pending_count, 1) self.assertEqual(results[0].queued_count, 1) self.assertEqual(results[0].running_count, 1) self.assertEqual(results[0].total_count, 3)
def test_get_existing_jobs_to_queue(self): """Tests calling RecipeHandler.get_existing_jobs_to_queue()""" input_name_1 = 'Test Input 1' output_name_1 = 'Test Output 1' interface_1 = { 'version': '1.0', 'command': 'my_cmd', 'command_arguments': 'args', 'input_data': [{ 'name': input_name_1, 'type': 'file', 'media_types': ['text/plain'], }], 'output_data': [{ 'name': output_name_1, 'type': 'files', 'media_type': 'image/png', }], } job_type_1 = job_test_utils.create_job_type(interface=interface_1) job_1 = job_test_utils.create_job(job_type=job_type_1) input_name_2 = 'Test Input 2' output_name_2 = 'Test Output 2' interface_2 = { 'version': '1.0', 'command': 'my_cmd', 'command_arguments': 'args', 'input_data': [{ 'name': input_name_2, 'type': 'files', 'media_types': ['image/png', 'image/tiff'], }], 'output_data': [{ 'name': output_name_2, 'type': 'file', }], } job_type_2 = job_test_utils.create_job_type(interface=interface_2) job_2 = job_test_utils.create_job(job_type=job_type_2) workspace = storage_test_utils.create_workspace() file_1 = storage_test_utils.create_file(workspace=workspace, media_type='text/plain') definition = { 'version': '1.0', 'input_data': [{ 'name': 'Recipe Input', 'type': 'file', 'media_types': ['text/plain'], }], 'jobs': [{ 'name': 'Job 1', 'job_type': { 'name': job_type_1.name, 'version': job_type_1.version, }, 'recipe_inputs': [{ 'recipe_input': 'Recipe Input', 'job_input': input_name_1, }] }, { 'name': 'Job 2', 'job_type': { 'name': job_type_2.name, 'version': job_type_2.version, }, 'dependencies': [{ 'name': 'Job 1', 'connections': [{ 'output': output_name_1, 'input': input_name_2, }], }], }], } data = { 'version': '1.0', 'input_data': [{ 'name': 'Recipe Input', 'file_id': file_1.id, }], 'workspace_id': workspace.id, } recipe_type = recipe_test_utils.create_recipe_type( definition=definition) recipe = recipe_test_utils.create_recipe(recipe_type=recipe_type, data=data) recipe_test_utils.create_recipe_job(recipe=recipe, job_name='Job 1', job=job_1) recipe_test_utils.create_recipe_job(recipe=recipe, job_name='Job 2', job=job_2) recipe_jobs = list(RecipeJob.objects.filter(recipe_id=recipe.id)) handler = RecipeHandler(recipe, recipe_jobs) jobs_to_queue = handler.get_existing_jobs_to_queue() # Make sure only Job 1 is returned and that its job data is correct self.assertEqual(len(jobs_to_queue), 1) self.assertEqual(jobs_to_queue[0][0].id, job_1.id) self.assertDictEqual( jobs_to_queue[0][1].get_dict(), { 'version': '1.0', 'input_data': [{ 'name': input_name_1, 'file_id': file_1.id, }], 'output_data': [{ 'name': output_name_1, 'workspace_id': workspace.id, }], })
def setUp(self): django.setup() self.job_failed = job_test_utils.create_job(status='FAILED') self.job_completed = job_test_utils.create_job(status='COMPLETED') self.job_running = job_test_utils.create_job(status='RUNNING') self.job_queued = job_test_utils.create_job(status='QUEUED') self.job_canceled = job_test_utils.create_job(status='CANCELED') self.job_fa_co_a = job_test_utils.create_job(status='BLOCKED') self.job_fa_co_b = job_test_utils.create_job(status='PENDING') self.job_co_ru_qu_a = job_test_utils.create_job(status='BLOCKED') self.job_co_ru_qu_b = job_test_utils.create_job(status='BLOCKED') self.job_qu_ca_a = job_test_utils.create_job(status='PENDING') self.job_qu_ca_b = job_test_utils.create_job(status='PENDING') self.definition = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'job_failed', 'job_type': { 'name': self.job_failed.job_type.name, 'version': self.job_failed.job_type.version, }, }, { 'name': 'job_completed', 'job_type': { 'name': self.job_completed.job_type.name, 'version': self.job_completed.job_type.version, }, }, { 'name': 'job_running', 'job_type': { 'name': self.job_running.job_type.name, 'version': self.job_running.job_type.version, }, }, { 'name': 'job_queued', 'job_type': { 'name': self.job_queued.job_type.name, 'version': self.job_queued.job_type.version, }, }, { 'name': 'job_canceled', 'job_type': { 'name': self.job_canceled.job_type.name, 'version': self.job_canceled.job_type.version, }, }, { 'name': 'job_fa_co_a', 'job_type': { 'name': self.job_fa_co_a.job_type.name, 'version': self.job_fa_co_a.job_type.version, }, 'dependencies': [{ 'name': 'job_failed', }, { 'name': 'job_completed', }], }, { 'name': 'job_fa_co_b', 'job_type': { 'name': self.job_fa_co_b.job_type.name, 'version': self.job_fa_co_b.job_type.version, }, 'dependencies': [{ 'name': 'job_fa_co_a', }], }, { 'name': 'job_co_ru_qu_a', 'job_type': { 'name': self.job_co_ru_qu_a.job_type.name, 'version': self.job_co_ru_qu_a.job_type.version, }, 'dependencies': [{ 'name': 'job_completed', }, { 'name': 'job_running', }, { 'name': 'job_queued', }], }, { 'name': 'job_co_ru_qu_b', 'job_type': { 'name': self.job_co_ru_qu_b.job_type.name, 'version': self.job_co_ru_qu_b.job_type.version, }, 'dependencies': [{ 'name': 'job_co_ru_qu_a', }], }, { 'name': 'job_qu_ca_a', 'job_type': { 'name': self.job_qu_ca_a.job_type.name, 'version': self.job_qu_ca_a.job_type.version, }, 'dependencies': [{ 'name': 'job_queued', }, { 'name': 'job_canceled', }], }, { 'name': 'job_qu_ca_b', 'job_type': { 'name': self.job_qu_ca_b.job_type.name, 'version': self.job_qu_ca_b.job_type.version, }, 'dependencies': [{ 'name': 'job_qu_ca_a', }], }], } self.recipe_type = recipe_test_utils.create_recipe_type( definition=self.definition) self.recipe = recipe_test_utils.create_recipe( recipe_type=self.recipe_type) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_failed', job=self.job_failed) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_completed', job=self.job_completed) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_running', job=self.job_running) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_queued', job=self.job_queued) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_canceled', job=self.job_canceled) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_fa_co_a', job=self.job_fa_co_a) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_fa_co_b', job=self.job_fa_co_b) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_co_ru_qu_a', job=self.job_co_ru_qu_a) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_co_ru_qu_b', job=self.job_co_ru_qu_b) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_qu_ca_a', job=self.job_qu_ca_a) recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='job_qu_ca_b', job=self.job_qu_ca_b) self.recipe_jobs = list( RecipeJob.objects.filter(recipe_id=self.recipe.id))
def test_update_job_exe_dup(self): """Tests running the database update to remove job execution duplicates""" # Create jobs with duplicate job executions job_type = job_test_utils.create_job_type() job_1 = job_test_utils.create_job(job_type=job_type, num_exes=2) job_2 = job_test_utils.create_job(job_type=job_type, num_exes=3) job_3 = job_test_utils.create_job(job_type=job_type, num_exes=2) # Job 1 job_exe_1 = job_test_utils.create_job_exe(job=job_1, status='COMPLETED', exe_num=1) job_exe_2 = job_test_utils.create_job_exe(job=job_1, status='COMPLETED', exe_num=1) job_exe_3 = job_test_utils.create_job_exe(job=job_1, status='COMPLETED', exe_num=2) job_exe_4 = job_test_utils.create_job_exe(job=job_1, status='COMPLETED', exe_num=2) # Job 2 job_exe_5 = job_test_utils.create_job_exe(job=job_2, status='COMPLETED', exe_num=1) job_exe_6 = job_test_utils.create_job_exe(job=job_2, status='COMPLETED', exe_num=2) job_exe_7 = job_test_utils.create_job_exe(job=job_2, status='COMPLETED', exe_num=2) job_exe_8 = job_test_utils.create_job_exe(job=job_2, status='COMPLETED', exe_num=3) # Job 3 job_exe_9 = job_test_utils.create_job_exe(job=job_3, status='COMPLETED', exe_num=1) # Create some task updates to make sure they get deleted as well task_updates = [] task_updates.append( TaskUpdate(job_exe=job_exe_1, task_id='1234', status='foo')) task_updates.append( TaskUpdate(job_exe=job_exe_1, task_id='1234', status='foo')) task_updates.append( TaskUpdate(job_exe=job_exe_2, task_id='1234', status='foo')) task_updates.append( TaskUpdate(job_exe=job_exe_2, task_id='1234', status='foo')) task_updates.append( TaskUpdate(job_exe=job_exe_3, task_id='1234', status='foo')) task_updates.append( TaskUpdate(job_exe=job_exe_3, task_id='1234', status='foo')) task_updates.append( TaskUpdate(job_exe=job_exe_4, task_id='1234', status='foo')) task_updates.append( TaskUpdate(job_exe=job_exe_4, task_id='1234', status='foo')) TaskUpdate.objects.bulk_create(task_updates) # Run update updater = DatabaseUpdater() updater.update() expected_job_exe_ids = { job_exe_1.id, job_exe_3.id, job_exe_5.id, job_exe_6.id, job_exe_8.id, job_exe_9.id } actual_job_exe_ids = set() for job_exe in JobExecution.objects.all().only('id'): actual_job_exe_ids.add(job_exe.id) self.assertSetEqual(expected_job_exe_ids, actual_job_exe_ids)
def test_update_batch_fields(self): """Tests running the database update to populate new batch fields in job and recipe models""" definition = {"priority": 303} batch_1 = batch_test_utils.create_batch_old(definition=definition) batch_1.recipe_type_rev_id = 1 batch_1.configuration = {} batch_1.save() batch_1.creator_job.status = 'COMPLETED' batch_1.creator_job.save() batch_2 = batch_test_utils.create_batch() recipe_type = recipe_test_utils.create_recipe_type() recipe_1 = recipe_test_utils.create_recipe(recipe_type=recipe_type) recipe_2 = recipe_test_utils.create_recipe(recipe_type=recipe_type) job_1 = job_test_utils.create_job() job_2 = job_test_utils.create_job() batch_test_utils.create_batch_recipe(batch=batch_1, recipe=recipe_1) batch_test_utils.create_batch_recipe(batch=batch_1, recipe=recipe_2) batch_test_utils.create_batch_job(batch=batch_1, job=job_1) batch_test_utils.create_batch_job(batch=batch_1, job=job_2) batch_1.total_count = 2 batch_1.save() recipe_3 = recipe_test_utils.create_recipe() recipe_4 = recipe_test_utils.create_recipe() job_3 = job_test_utils.create_job() job_4 = job_test_utils.create_job() batch_test_utils.create_batch_recipe(batch=batch_2, recipe=recipe_3) batch_test_utils.create_batch_recipe(batch=batch_2, recipe=recipe_4) batch_test_utils.create_batch_job(batch=batch_2, job=job_3) batch_test_utils.create_batch_job(batch=batch_2, job=job_4) # This batch tests an old batch that never created any recipes time_rev_1 = now() time_rev_2 = time_rev_1 + timedelta(minutes=1) time_batch = time_rev_2 + timedelta(minutes=1) time_rev_3 = time_batch + timedelta(minutes=1) recipe_type_3 = recipe_test_utils.create_recipe_type() local_definition = copy.deepcopy(recipe_type_3.definition) recipe_test_utils.edit_recipe_type(recipe_type_3, local_definition) recipe_test_utils.edit_recipe_type(recipe_type_3, local_definition) RecipeTypeRevision.objects.filter( recipe_type_id=recipe_type_3.id, revision_num=1).update(created=time_rev_1) RecipeTypeRevision.objects.filter( recipe_type_id=recipe_type_3.id, revision_num=2).update(created=time_rev_2) RecipeTypeRevision.objects.filter( recipe_type_id=recipe_type_3.id, revision_num=3).update(created=time_rev_3) batch_3 = batch_test_utils.create_batch_old(recipe_type=recipe_type_3) batch_3.recipe_type_rev_id = 1 batch_3.created = time_batch batch_3.save() # Run update updater = DatabaseUpdater() updater.update() # Check results batch_1 = Batch.objects.get(id=batch_1.id) self.assertTrue(batch_1.is_creation_done) self.assertEqual(batch_1.recipes_estimated, 2) recipe_type_rev = RecipeTypeRevision.objects.get_revision( recipe_type.name, recipe_type.revision_num) self.assertEqual(batch_1.recipe_type_rev_id, recipe_type_rev.id) self.assertEqual(batch_1.root_batch_id, batch_1.id) self.assertEqual(batch_1.get_configuration().priority, 303) job_1 = Job.objects.get(id=job_1.id) self.assertEqual(job_1.batch_id, batch_1.id) job_2 = Job.objects.get(id=job_2.id) self.assertEqual(job_2.batch_id, batch_1.id) job_3 = Job.objects.get(id=job_3.id) self.assertEqual(job_3.batch_id, batch_2.id) job_4 = Job.objects.get(id=job_4.id) self.assertEqual(job_4.batch_id, batch_2.id) recipe_1 = Recipe.objects.get(id=recipe_1.id) self.assertEqual(recipe_1.batch_id, batch_1.id) recipe_2 = Recipe.objects.get(id=recipe_2.id) self.assertEqual(recipe_2.batch_id, batch_1.id) recipe_3 = Recipe.objects.get(id=recipe_3.id) self.assertEqual(recipe_3.batch_id, batch_2.id) recipe_4 = Recipe.objects.get(id=recipe_4.id) self.assertEqual(recipe_4.batch_id, batch_2.id) batch_3 = Batch.objects.get(id=batch_3.id) recipe_type_rev = RecipeTypeRevision.objects.get_revision( recipe_type_3.name, 2) self.assertEqual(batch_3.recipe_type_rev_id, recipe_type_rev.id) self.assertEqual(batch_3.root_batch_id, batch_3.id)
def test_execute(self): """Tests calling UpdateRecipeMetrics.execute() successfully""" recipe_1 = recipe_test_utils.create_recipe() job_1 = job_test_utils.create_job(status='FAILED') job_2 = job_test_utils.create_job(status='CANCELED') job_3 = job_test_utils.create_job(status='BLOCKED') job_4 = job_test_utils.create_job(status='BLOCKED') job_5 = job_test_utils.create_job(status='COMPLETED') recipe_node_1 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_1) recipe_node_2 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_2) recipe_node_3 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_3) recipe_node_4 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_4) recipe_node_5 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_5) RecipeNode.objects.bulk_create([ recipe_node_1, recipe_node_2, recipe_node_3, recipe_node_4, recipe_node_5 ]) batch = batch_test_utils.create_batch() recipe_2 = recipe_test_utils.create_recipe(batch=batch) job_6 = job_test_utils.create_job(status='COMPLETED') job_7 = job_test_utils.create_job(status='COMPLETED') job_8 = job_test_utils.create_job(status='RUNNING') job_9 = job_test_utils.create_job(status='QUEUED') job_10 = job_test_utils.create_job(status='PENDING') job_11 = job_test_utils.create_job(status='PENDING') job_12 = job_test_utils.create_job(status='PENDING') job_13 = job_test_utils.create_job(status='CANCELED') job_14 = job_test_utils.create_job(status='BLOCKED') recipe_node_6 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_6) recipe_node_7 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_7) recipe_node_8 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_8) recipe_node_9 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_9) recipe_node_10 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_10) recipe_node_11 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_11) recipe_node_12 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_12) recipe_node_13 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_13) recipe_node_14 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_14) RecipeNode.objects.bulk_create([ recipe_node_6, recipe_node_7, recipe_node_8, recipe_node_9, recipe_node_10, recipe_node_11, recipe_node_12, recipe_node_13, recipe_node_14 ]) # Add recipes to message message = UpdateRecipeMetrics() if message.can_fit_more(): message.add_recipe(recipe_1.id) if message.can_fit_more(): message.add_recipe(recipe_2.id) # Execute message result = message.execute() self.assertTrue(result) recipe_1 = Recipe.objects.get(id=recipe_1.id) self.assertEqual(recipe_1.jobs_total, 5) self.assertEqual(recipe_1.jobs_pending, 0) self.assertEqual(recipe_1.jobs_blocked, 2) self.assertEqual(recipe_1.jobs_queued, 0) self.assertEqual(recipe_1.jobs_running, 0) self.assertEqual(recipe_1.jobs_failed, 1) self.assertEqual(recipe_1.jobs_completed, 1) self.assertEqual(recipe_1.jobs_canceled, 1) recipe_2 = Recipe.objects.get(id=recipe_2.id) self.assertEqual(recipe_2.jobs_total, 9) self.assertEqual(recipe_2.jobs_pending, 3) self.assertEqual(recipe_2.jobs_blocked, 1) self.assertEqual(recipe_2.jobs_queued, 1) self.assertEqual(recipe_2.jobs_running, 1) self.assertEqual(recipe_2.jobs_failed, 0) self.assertEqual(recipe_2.jobs_completed, 2) self.assertEqual(recipe_2.jobs_canceled, 1) # Make sure message is created to update batch metrics self.assertEqual(len(message.new_messages), 1) msg = message.new_messages[0] self.assertEqual(msg.type, 'update_batch_metrics') self.assertListEqual(msg._batch_ids, [batch.id]) # Test executing message again message_json_dict = message.to_json() message = UpdateRecipeMetrics.from_json(message_json_dict) result = message.execute() self.assertTrue(result) recipe_1 = Recipe.objects.get(id=recipe_1.id) self.assertEqual(recipe_1.jobs_total, 5) self.assertEqual(recipe_1.jobs_pending, 0) self.assertEqual(recipe_1.jobs_blocked, 2) self.assertEqual(recipe_1.jobs_queued, 0) self.assertEqual(recipe_1.jobs_running, 0) self.assertEqual(recipe_1.jobs_failed, 1) self.assertEqual(recipe_1.jobs_completed, 1) self.assertEqual(recipe_1.jobs_canceled, 1) recipe_2 = Recipe.objects.get(id=recipe_2.id) self.assertEqual(recipe_2.jobs_total, 9) self.assertEqual(recipe_2.jobs_pending, 3) self.assertEqual(recipe_2.jobs_blocked, 1) self.assertEqual(recipe_2.jobs_queued, 1) self.assertEqual(recipe_2.jobs_running, 1) self.assertEqual(recipe_2.jobs_failed, 0) self.assertEqual(recipe_2.jobs_completed, 2) self.assertEqual(recipe_2.jobs_canceled, 1) # Make sure message is created to update batch metrics self.assertEqual(len(message.new_messages), 1) msg = message.new_messages[0] self.assertEqual(msg.type, 'update_batch_metrics') self.assertListEqual(msg._batch_ids, [batch.id])
def test_completed_failed(self): """Tests the metrics plot view completed and failed""" from django.utils.timezone import utc job1 = job_test_utils.create_job(status='FAILED', ended=datetime.datetime(2015, 1, 1, 10, tzinfo=utc)) job_test_utils.create_job_exe(job=job1, status='FAILED', ended=job1.ended) job2 = job_test_utils.create_job(status='FAILED', ended=datetime.datetime(2015, 1, 1, 11, tzinfo=utc)) job_test_utils.create_job_exe(job=job2, status='FAILED', ended=job2.ended) job3 = job_test_utils.create_job(status='FAILED', ended=datetime.datetime(2015, 1, 1, 12, tzinfo=utc)) job_test_utils.create_job_exe(job=job3, status='FAILED', ended=job3.ended) job4 = job_test_utils.create_job(status='FAILED', ended=datetime.datetime(2015, 1, 1, 13, tzinfo=utc)) job_test_utils.create_job_exe(job=job4, status='FAILED', ended=job4.ended) job5 = job_test_utils.create_job(status='COMPLETED', ended=datetime.datetime(2015, 1, 1, 10, tzinfo=utc)) job_test_utils.create_job_exe(job=job5, status=job5.status, ended=job5.ended) job6 = job_test_utils.create_job(status='COMPLETED', ended=datetime.datetime(2015, 1, 1, 11, tzinfo=utc)) job_test_utils.create_job_exe(job=job6, status=job6.status, ended=job6.ended) job7 = job_test_utils.create_job(status='COMPLETED', ended=datetime.datetime(2015, 1, 1, 12, tzinfo=utc)) job_test_utils.create_job_exe(job=job7, status=job7.status, ended=job7.ended) job8 = job_test_utils.create_job(status='COMPLETED', ended=datetime.datetime(2015, 1, 1, 13, tzinfo=utc)) job_test_utils.create_job_exe(job=job8, status=job8.status, ended=job8.ended) job9 = job_test_utils.create_job(status='COMPLETED', ended=datetime.datetime(2015, 1, 1, 14, tzinfo=utc)) job_test_utils.create_job_exe(job=job9, status=job9.status, ended=job9.ended) from metrics.models import MetricsJobType MetricsJobType.objects.calculate( datetime.datetime(2015, 1, 1, tzinfo=utc)) url = '/v6/metrics/job-types/plot-data/?column=completed_count&column=failed_count&dataType=job-types&started=2015-01-01T11:00:00Z&ended=2015-01-01T13:00:00Z' response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) self.assertEqual(len(result['results']), 2) self.assertEqual(result['results'][0]['min_x'], unicode('2015-01-01T11:00:00Z')) self.assertEqual(result['results'][0]['max_x'], unicode('2015-01-01T13:00:00Z')) self.assertEqual(len(result['results'][0]['values']), 3) for value in result['results'][0]['values']: self.assertEqual(value['value'], 1) self.assertEqual(len(result['results'][1]['values']), 3) self.assertEqual(result['results'][1]['min_x'], unicode('2015-01-01T11:00:00Z')) self.assertEqual(result['results'][1]['max_x'], unicode('2015-01-01T13:00:00Z')) for value in result['results'][1]['values']: self.assertEqual(value['value'], 1)
def test_execute_with_sub_recipes(self): """Tests calling UpdateRecipeMetrics.execute() successfully with sub-recipes""" recipe_1 = recipe_test_utils.create_recipe() batch = batch_test_utils.create_batch() recipe_2 = recipe_test_utils.create_recipe(batch=batch) # Recipe 1 jobs job_1 = job_test_utils.create_job(status='FAILED', save=False) job_2 = job_test_utils.create_job(status='CANCELED', save=False) job_3 = job_test_utils.create_job(status='BLOCKED', save=False) job_4 = job_test_utils.create_job(status='BLOCKED', save=False) job_5 = job_test_utils.create_job(status='COMPLETED', save=False) # Recipe 2 jobs job_6 = job_test_utils.create_job(status='COMPLETED', save=False) job_7 = job_test_utils.create_job(status='COMPLETED', save=False) job_8 = job_test_utils.create_job(status='RUNNING', save=False) job_9 = job_test_utils.create_job(status='QUEUED', save=False) job_10 = job_test_utils.create_job(status='PENDING', save=False) job_11 = job_test_utils.create_job(status='PENDING', save=False) job_12 = job_test_utils.create_job(status='PENDING', save=False) job_13 = job_test_utils.create_job(status='CANCELED', save=False) job_14 = job_test_utils.create_job(status='BLOCKED', save=False) Job.objects.bulk_create([ job_1, job_2, job_3, job_4, job_5, job_6, job_7, job_8, job_9, job_10, job_11, job_12, job_13, job_14 ]) # Recipe 1 sub-recipes sub_recipe_1 = recipe_test_utils.create_recipe(save=False) sub_recipe_1.jobs_total = 26 sub_recipe_1.jobs_pending = 3 sub_recipe_1.jobs_blocked = 4 sub_recipe_1.jobs_queued = 5 sub_recipe_1.jobs_running = 1 sub_recipe_1.jobs_failed = 2 sub_recipe_1.jobs_completed = 3 sub_recipe_1.jobs_canceled = 8 sub_recipe_1.is_completed = False sub_recipe_2 = recipe_test_utils.create_recipe(save=False) sub_recipe_2.jobs_total = 30 sub_recipe_2.jobs_completed = 30 sub_recipe_2.is_completed = True # Recipe 2 sub-recipes sub_recipe_3 = recipe_test_utils.create_recipe(save=False) sub_recipe_3.jobs_total = 21 sub_recipe_3.jobs_pending = 2 sub_recipe_3.jobs_blocked = 5 sub_recipe_3.jobs_queued = 0 sub_recipe_3.jobs_running = 3 sub_recipe_3.jobs_failed = 2 sub_recipe_3.jobs_completed = 8 sub_recipe_3.jobs_canceled = 1 sub_recipe_3.is_completed = False sub_recipe_4 = recipe_test_utils.create_recipe(save=False) sub_recipe_4.jobs_total = 7 sub_recipe_4.jobs_completed = 7 sub_recipe_4.is_completed = True sub_recipe_5 = recipe_test_utils.create_recipe(save=False) sub_recipe_5.jobs_total = 12 sub_recipe_5.jobs_completed = 12 sub_recipe_5.is_completed = True Recipe.objects.bulk_create([ sub_recipe_1, sub_recipe_2, sub_recipe_3, sub_recipe_4, sub_recipe_5 ]) # Recipe 1 nodes recipe_node_1 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_1) recipe_node_2 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_2) recipe_node_3 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_3) recipe_node_4 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_4) recipe_node_5 = recipe_test_utils.create_recipe_node(recipe=recipe_1, job=job_5) recipe_node_6 = recipe_test_utils.create_recipe_node( recipe=recipe_1, sub_recipe=sub_recipe_1) recipe_node_7 = recipe_test_utils.create_recipe_node( recipe=recipe_1, sub_recipe=sub_recipe_2) # Recipe 2 nodes recipe_node_8 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_6) recipe_node_9 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_7) recipe_node_10 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_8) recipe_node_11 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_9) recipe_node_12 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_10) recipe_node_13 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_11) recipe_node_14 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_12) recipe_node_15 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_13) recipe_node_16 = recipe_test_utils.create_recipe_node(recipe=recipe_2, job=job_14) recipe_node_17 = recipe_test_utils.create_recipe_node( recipe=recipe_2, sub_recipe=sub_recipe_3) recipe_node_18 = recipe_test_utils.create_recipe_node( recipe=recipe_2, sub_recipe=sub_recipe_4) recipe_node_19 = recipe_test_utils.create_recipe_node( recipe=recipe_2, sub_recipe=sub_recipe_5) RecipeNode.objects.bulk_create([ recipe_node_1, recipe_node_2, recipe_node_3, recipe_node_4, recipe_node_5, recipe_node_6, recipe_node_7, recipe_node_8, recipe_node_9, recipe_node_10, recipe_node_11, recipe_node_12, recipe_node_13, recipe_node_14, recipe_node_15, recipe_node_16, recipe_node_17, recipe_node_18, recipe_node_19 ]) # Add recipes to message message = UpdateRecipeMetrics() if message.can_fit_more(): message.add_recipe(recipe_1.id) if message.can_fit_more(): message.add_recipe(recipe_2.id) # Execute message result = message.execute() self.assertTrue(result) recipe_1 = Recipe.objects.get(id=recipe_1.id) self.assertEqual(recipe_1.jobs_total, 61) self.assertEqual(recipe_1.jobs_pending, 3) self.assertEqual(recipe_1.jobs_blocked, 6) self.assertEqual(recipe_1.jobs_queued, 5) self.assertEqual(recipe_1.jobs_running, 1) self.assertEqual(recipe_1.jobs_failed, 3) self.assertEqual(recipe_1.jobs_completed, 34) self.assertEqual(recipe_1.jobs_canceled, 9) self.assertEqual(recipe_1.sub_recipes_total, 2) self.assertEqual(recipe_1.sub_recipes_completed, 1) recipe_2 = Recipe.objects.get(id=recipe_2.id) self.assertEqual(recipe_2.jobs_total, 49) self.assertEqual(recipe_2.jobs_pending, 5) self.assertEqual(recipe_2.jobs_blocked, 6) self.assertEqual(recipe_2.jobs_queued, 1) self.assertEqual(recipe_2.jobs_running, 4) self.assertEqual(recipe_2.jobs_failed, 2) self.assertEqual(recipe_2.jobs_completed, 29) self.assertEqual(recipe_2.jobs_canceled, 2) self.assertEqual(recipe_2.sub_recipes_total, 3) self.assertEqual(recipe_2.sub_recipes_completed, 2) # Make sure message is created to update batch metrics self.assertEqual(len(message.new_messages), 1) msg = message.new_messages[0] self.assertEqual(msg.type, 'update_batch_metrics') self.assertListEqual(msg._batch_ids, [batch.id]) # Test executing message again message_json_dict = message.to_json() message = UpdateRecipeMetrics.from_json(message_json_dict) result = message.execute() self.assertTrue(result) recipe_1 = Recipe.objects.get(id=recipe_1.id) self.assertEqual(recipe_1.jobs_total, 61) self.assertEqual(recipe_1.jobs_pending, 3) self.assertEqual(recipe_1.jobs_blocked, 6) self.assertEqual(recipe_1.jobs_queued, 5) self.assertEqual(recipe_1.jobs_running, 1) self.assertEqual(recipe_1.jobs_failed, 3) self.assertEqual(recipe_1.jobs_completed, 34) self.assertEqual(recipe_1.jobs_canceled, 9) self.assertEqual(recipe_1.sub_recipes_total, 2) self.assertEqual(recipe_1.sub_recipes_completed, 1) recipe_2 = Recipe.objects.get(id=recipe_2.id) self.assertEqual(recipe_2.jobs_total, 49) self.assertEqual(recipe_2.jobs_pending, 5) self.assertEqual(recipe_2.jobs_blocked, 6) self.assertEqual(recipe_2.jobs_queued, 1) self.assertEqual(recipe_2.jobs_running, 4) self.assertEqual(recipe_2.jobs_failed, 2) self.assertEqual(recipe_2.jobs_completed, 29) self.assertEqual(recipe_2.jobs_canceled, 2) self.assertEqual(recipe_2.sub_recipes_total, 3) self.assertEqual(recipe_2.sub_recipes_completed, 2) # Make sure message is created to update batch metrics self.assertEqual(len(message.new_messages), 1) msg = message.new_messages[0] self.assertEqual(msg.type, 'update_batch_metrics') self.assertListEqual(msg._batch_ids, [batch.id])
def test_calculate_stats(self): """Tests calculating individual statistics for a metrics entry.""" job_type = job_test_utils.create_seed_job_type() job1 = job_test_utils.create_job(job_type=job_type, status='COMPLETED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) task_results_dict = { 'version': '1.0', 'tasks': [{ 'task_id': '1', 'type': 'pre', 'was_launched': True, 'started': datetime_to_string( datetime.datetime(2015, 1, 1, 0, 30, 4, tzinfo=utc)), 'ended': datetime_to_string( datetime.datetime(2015, 1, 1, 1, 6, tzinfo=utc)) }, { 'task_id': '2', 'type': 'main', 'was_launched': True, 'started': datetime_to_string( datetime.datetime(2015, 1, 1, 1, 40, 8, tzinfo=utc)), 'ended': datetime_to_string( datetime.datetime(2015, 1, 1, 2, 30, 10, tzinfo=utc)) }, { 'task_id': '3', 'type': 'post', 'was_launched': True, 'started': datetime_to_string( datetime.datetime(2015, 1, 1, 3, 30, 12, tzinfo=utc)), 'ended': datetime_to_string( datetime.datetime(2015, 1, 1, 4, 40, 14, tzinfo=utc)) }] } job_test_utils.create_job_exe( job=job1, status=job1.status, queued=datetime.datetime(2015, 1, 1, tzinfo=utc), started=datetime.datetime(2015, 1, 1, 0, 10, 2, tzinfo=utc), ended=datetime.datetime(2015, 1, 1, 6, 0, 16, tzinfo=utc), task_results=TaskResults(task_results_dict)) job2 = job_test_utils.create_job(job_type=job_type, status='COMPLETED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) task_results_dict = { 'version': '1.0', 'tasks': [{ 'task_id': '1', 'type': 'pre', 'was_launched': True, 'started': datetime_to_string( datetime.datetime(2015, 1, 1, 4, 30, 4, tzinfo=utc)), 'ended': datetime_to_string( datetime.datetime(2015, 1, 1, 6, 0, 8, tzinfo=utc)) }, { 'task_id': '2', 'type': 'main', 'was_launched': True, 'started': datetime_to_string( datetime.datetime(2015, 1, 1, 8, 40, 14, tzinfo=utc)), 'ended': datetime_to_string( datetime.datetime(2015, 1, 1, 10, 30, 22, tzinfo=utc)) }, { 'task_id': '3', 'type': 'post', 'was_launched': True, 'started': datetime_to_string( datetime.datetime(2015, 1, 1, 12, 30, 32, tzinfo=utc)), 'ended': datetime_to_string( datetime.datetime(2015, 1, 1, 14, 40, 44, tzinfo=utc)) }] } job_test_utils.create_job_exe( job=job2, status=job2.status, queued=datetime.datetime(2015, 1, 1, tzinfo=utc), started=datetime.datetime(2015, 1, 1, 2, 10, 2, tzinfo=utc), ended=datetime.datetime(2015, 1, 1, 16, 0, 58, tzinfo=utc), task_results=TaskResults(task_results_dict)) sys_error = error_test_utils.create_error(category='SYSTEM') job3a = job_test_utils.create_job(job_type=job_type, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc), error=sys_error) job_test_utils.create_job_exe(job=job3a, status=job3a.status, ended=job3a.ended, error=sys_error) data_error = error_test_utils.create_error(category='DATA') job3b = job_test_utils.create_job(job_type=job_type, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc), error=data_error) job_test_utils.create_job_exe(job=job3b, status=job3b.status, ended=job3b.ended, error=data_error) algo_error = error_test_utils.create_error(category='ALGORITHM') job3c = job_test_utils.create_job(job_type=job_type, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc), error=algo_error) job_test_utils.create_job_exe(job=job3c, status=job3c.status, ended=job3c.ended, error=algo_error) job4 = job_test_utils.create_job(job_type=job_type, status='CANCELED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job4, status=job4.status, ended=job4.ended) MetricsJobType.objects.calculate( datetime.datetime(2015, 1, 1, tzinfo=utc)) entries = MetricsJobType.objects.filter( occurred=datetime.datetime(2015, 1, 1, tzinfo=utc)) self.assertEqual(len(entries), 1) entry = entries.first() self.assertEqual(entry.occurred, datetime.datetime(2015, 1, 1, tzinfo=utc)) self.assertEqual(entry.completed_count, 2) self.assertEqual(entry.failed_count, 3) self.assertEqual(entry.canceled_count, 1) self.assertEqual(entry.total_count, 6) self.assertEqual(entry.error_system_count, 1) self.assertEqual(entry.error_data_count, 1) self.assertEqual(entry.error_algorithm_count, 1) self.assertEqual(entry.queue_time_sum, 8404) self.assertEqual(entry.queue_time_min, 602) self.assertEqual(entry.queue_time_max, 7802) self.assertEqual(entry.queue_time_avg, 4202) self.assertEqual(entry.pre_time_sum, 7560) self.assertEqual(entry.pre_time_min, 2156) self.assertEqual(entry.pre_time_max, 5404) self.assertEqual(entry.pre_time_avg, 3780) self.assertEqual(entry.job_time_sum, 9610) self.assertEqual(entry.job_time_min, 3002) self.assertEqual(entry.job_time_max, 6608) self.assertEqual(entry.job_time_avg, 4805) self.assertEqual(entry.post_time_sum, 12014) self.assertEqual(entry.post_time_min, 4202) self.assertEqual(entry.post_time_max, 7812) self.assertEqual(entry.post_time_avg, 6007) self.assertEqual(entry.run_time_sum, 70870) self.assertEqual(entry.run_time_min, 21014) self.assertEqual(entry.run_time_max, 49856) self.assertEqual(entry.run_time_avg, 35435) self.assertEqual(entry.stage_time_sum, 41686) self.assertEqual(entry.stage_time_min, 11654) self.assertEqual(entry.stage_time_max, 30032) self.assertEqual(entry.stage_time_avg, 20843)
def setUp(self): django.setup() self.new_priority = 200 self.standalone_failed_job = job_test_utils.create_job(status='FAILED', num_exes=3, priority=100) self.standalone_superseded_job = job_test_utils.create_job( status='FAILED', num_exes=1) self.standalone_canceled_job = job_test_utils.create_job( status='CANCELED', num_exes=1, priority=100) self.standalone_completed_job = job_test_utils.create_job( status='COMPLETED') Job.objects.supersede_jobs([self.standalone_superseded_job], now()) # Create recipe for re-queing a job that should now be PENDING (and its dependencies) job_type_a_1 = job_test_utils.create_job_type() job_type_a_2 = job_test_utils.create_job_type() definition_a = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'Job 1', 'job_type': { 'name': job_type_a_1.name, 'version': job_type_a_1.version, } }, { 'name': 'Job 2', 'job_type': { 'name': job_type_a_2.name, 'version': job_type_a_2.version, }, 'dependencies': [{ 'name': 'Job 1' }], }], } recipe_type_a = recipe_test_utils.create_recipe_type( definition=definition_a) self.job_a_1 = job_test_utils.create_job(job_type=job_type_a_1, status='FAILED', num_exes=1) self.job_a_2 = job_test_utils.create_job(job_type=job_type_a_2, status='BLOCKED') data_a = { 'version': '1.0', 'input_data': [], 'workspace_id': 1, } recipe_a = recipe_test_utils.create_recipe(recipe_type=recipe_type_a, data=data_a) recipe_test_utils.create_recipe_job(recipe=recipe_a, job_name='Job 1', job=self.job_a_1) recipe_test_utils.create_recipe_job(recipe=recipe_a, job_name='Job 2', job=self.job_a_2) # Create recipe for re-queing a job that should now be BLOCKED (and its dependencies) job_type_b_1 = job_test_utils.create_job_type() job_type_b_2 = job_test_utils.create_job_type() job_type_b_3 = job_test_utils.create_job_type() definition_b = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'Job 1', 'job_type': { 'name': job_type_b_1.name, 'version': job_type_b_1.version, } }, { 'name': 'Job 2', 'job_type': { 'name': job_type_b_2.name, 'version': job_type_b_2.version, }, 'dependencies': [{ 'name': 'Job 1' }], }, { 'name': 'Job 3', 'job_type': { 'name': job_type_b_3.name, 'version': job_type_b_3.version, }, 'dependencies': [{ 'name': 'Job 2' }], }], } recipe_type_b = recipe_test_utils.create_recipe_type( definition=definition_b) self.job_b_1 = job_test_utils.create_job(job_type=job_type_b_1, status='FAILED') self.job_b_2 = job_test_utils.create_job(job_type=job_type_b_2, status='CANCELED') self.job_b_3 = job_test_utils.create_job(job_type=job_type_b_3, status='BLOCKED') data_b = { 'version': '1.0', 'input_data': [], 'workspace_id': 1, } recipe_b = recipe_test_utils.create_recipe(recipe_type=recipe_type_b, data=data_b) recipe_test_utils.create_recipe_job(recipe=recipe_b, job_name='Job 1', job=self.job_b_1) recipe_test_utils.create_recipe_job(recipe=recipe_b, job_name='Job 2', job=self.job_b_2) recipe_test_utils.create_recipe_job(recipe=recipe_b, job_name='Job 3', job=self.job_b_3) # Job IDs to re-queue self.job_ids = [ self.standalone_failed_job.id, self.standalone_canceled_job.id, self.standalone_completed_job.id, self.job_a_1.id, self.job_b_2.id ] # Register a fake processor self.mock_processor = MagicMock(QueueEventProcessor) Queue.objects.register_processor(lambda: self.mock_processor)
def test_json(self): """Tests coverting a UpdateRecipes message to and from JSON""" self.job_1_failed = job_test_utils.create_job(status='FAILED') self.job_1_pending = job_test_utils.create_job(status='PENDING') definition_1 = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'job_failed', 'job_type': { 'name': self.job_1_failed.job_type.name, 'version': self.job_1_failed.job_type.version, }, }, { 'name': 'job_pending', 'job_type': { 'name': self.job_1_pending.job_type.name, 'version': self.job_1_pending.job_type.version, }, 'dependencies': [{ 'name': 'job_failed', }], }], } self.recipe_type_1 = recipe_test_utils.create_recipe_type(definition=definition_1) self.recipe_1 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_1) recipe_test_utils.create_recipe_job(recipe=self.recipe_1, job_name='job_failed', job=self.job_1_failed) recipe_test_utils.create_recipe_job(recipe=self.recipe_1, job_name='job_pending', job=self.job_1_pending) self.job_2_running = job_test_utils.create_job(status='RUNNING') self.job_2_blocked = job_test_utils.create_job(status='BLOCKED') definition_2 = { 'version': '1.0', 'input_data': [], 'jobs': [{ 'name': 'job_running', 'job_type': { 'name': self.job_2_running.job_type.name, 'version': self.job_2_running.job_type.version, }, }, { 'name': 'job_blocked', 'job_type': { 'name': self.job_2_blocked.job_type.name, 'version': self.job_2_blocked.job_type.version, }, 'dependencies': [{ 'name': 'job_running', }], }], } self.recipe_type_2 = recipe_test_utils.create_recipe_type(definition=definition_2) self.recipe_2 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_2) recipe_test_utils.create_recipe_job(recipe=self.recipe_2, job_name='job_running', job=self.job_2_running) recipe_test_utils.create_recipe_job(recipe=self.recipe_2, job_name='job_blocked', job=self.job_2_blocked) # Add recipes to message message = UpdateRecipes() if message.can_fit_more(): message.add_recipe(self.recipe_1.id) if message.can_fit_more(): message.add_recipe(self.recipe_2.id) # Convert message to JSON and back, and then execute message_json_dict = message.to_json() new_message = UpdateRecipes.from_json(message_json_dict) result = new_message.execute() self.assertTrue(result) self.assertEqual(len(new_message.new_messages), 2)
def setUp(self): django.setup() self.job_type_1 = job_test_utils.create_seed_job_type( job_version='1.0') self.job_type_2 = job_test_utils.create_seed_job_type( job_version='2.0') self.job_type_3 = job_test_utils.create_seed_job_type( job_version='2.0') self.error_1 = Error.objects.create(name='Error 1', description='Test', category='SYSTEM') self.error_2 = Error.objects.create(name='Error 2', description='Test', category='SYSTEM') self.error_3 = Error.objects.create(name='Error 3', description='Test', category='DATA') # Date stamps for errors self.entry_1_last_time = datetime.datetime.utcfromtimestamp( 590000).replace(tzinfo=timezone.utc) self.entry_1_first_time = datetime.datetime.utcfromtimestamp( 580000).replace(tzinfo=timezone.utc) self.entry_2_time = datetime.datetime.utcfromtimestamp(585000).replace( tzinfo=timezone.utc) self.entry_3_last_time = datetime.datetime.utcfromtimestamp( 490000).replace(tzinfo=timezone.utc) self.entry_3_mid_time = datetime.datetime.utcfromtimestamp( 480000).replace(tzinfo=timezone.utc) self.entry_3_first_time = datetime.datetime.utcfromtimestamp( 470000).replace(tzinfo=timezone.utc) self.entry_4_time = datetime.datetime.utcfromtimestamp(385000).replace( tzinfo=timezone.utc) # Create jobs job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=timezone.now()) job_test_utils.create_job(job_type=self.job_type_1, error=self.error_1, status='FAILED', last_status_change=self.entry_2_time) job_test_utils.create_job(job_type=self.job_type_2, error=self.error_1, status='FAILED', last_status_change=self.entry_4_time) job_test_utils.create_job(job_type=self.job_type_2, error=self.error_2, status='FAILED', last_status_change=self.entry_1_last_time) job_test_utils.create_job(job_type=self.job_type_2, error=self.error_2, status='FAILED', last_status_change=self.entry_1_first_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED', last_status_change=self.entry_3_mid_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED', last_status_change=self.entry_3_last_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED', last_status_change=self.entry_3_first_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_3, status='FAILED', last_status_change=timezone.now())
def test_execute(self): """Tests calling UncancelJobs.execute() successfully""" old_when = now() when = old_when + datetime.timedelta(minutes=60) recipe = recipe_test_utils.create_recipe() job_1 = job_test_utils.create_job(num_exes=0, status='PENDING', last_status_change=old_when) job_2 = job_test_utils.create_job(num_exes=0, status='CANCELED', last_status_change=old_when, recipe=recipe) job_3 = job_test_utils.create_job(num_exes=1, status='CANCELED', last_status_change=old_when) job_4 = job_test_utils.create_job(num_exes=1, status='FAILED', last_status_change=old_when) job_ids = [job_1.id, job_2.id, job_3.id, job_4.id] recipe_test_utils.create_recipe_job(recipe=recipe, job=job_2) # Add jobs to message message = UncancelJobs() message.when = when if message.can_fit_more(): message.add_job(job_1.id) if message.can_fit_more(): message.add_job(job_2.id) if message.can_fit_more(): message.add_job(job_3.id) if message.can_fit_more(): message.add_job(job_4.id) # Execute message result = message.execute() self.assertTrue(result) from recipe.diff.forced_nodes import ForcedNodes from recipe.diff.json.forced_nodes_v6 import convert_forced_nodes_to_v6 forced_nodes = ForcedNodes() forced_nodes.set_all_nodes() forced_nodes_dict = convert_forced_nodes_to_v6(forced_nodes).get_dict() jobs = Job.objects.filter(id__in=job_ids).order_by('id') # Job 1 should not be updated because it was not CANCELED self.assertEqual(jobs[0].status, 'PENDING') self.assertEqual(jobs[0].last_status_change, old_when) # Job 2 should be uncanceled self.assertEqual(jobs[1].status, 'PENDING') self.assertEqual(jobs[1].last_status_change, when) # Job 3 should not be updated since it has already been queued self.assertEqual(jobs[2].status, 'CANCELED') self.assertEqual(jobs[2].last_status_change, old_when) # Job 4 should not be updated because it was not CANCELED self.assertEqual(jobs[3].status, 'FAILED') self.assertEqual(jobs[3].last_status_change, old_when) # Make sure update_recipe and update_recipe_metrics messages were created self.assertEqual(len(message.new_messages), 2) update_recipe_msg = None update_recipe_metrics_msg = None for msg in message.new_messages: if msg.type == 'update_recipe': update_recipe_msg = msg elif msg.type == 'update_recipe_metrics': update_recipe_metrics_msg = msg self.assertIsNotNone(update_recipe_msg) self.assertIsNotNone(update_recipe_metrics_msg) self.assertEqual(update_recipe_msg.root_recipe_id, recipe.id) self.assertDictEqual( convert_forced_nodes_to_v6( update_recipe_msg.forced_nodes).get_dict(), forced_nodes_dict) self.assertListEqual(update_recipe_metrics_msg._recipe_ids, [recipe.id]) # Test executing message again newer_when = when + datetime.timedelta(minutes=60) message_json_dict = message.to_json() message = UncancelJobs.from_json(message_json_dict) message.when = newer_when result = message.execute() self.assertTrue(result) jobs = Job.objects.filter(id__in=job_ids).order_by('id') # Job 1 should not be updated because it was not CANCELED self.assertEqual(jobs[0].status, 'PENDING') self.assertEqual(jobs[0].last_status_change, old_when) # Job 2 should not be updated since it already was last mexxage execution self.assertEqual(jobs[1].status, 'PENDING') self.assertEqual(jobs[1].last_status_change, when) # Job 3 should not be updated since it has already been queued self.assertEqual(jobs[2].status, 'CANCELED') self.assertEqual(jobs[2].last_status_change, old_when) # Job 4 should not be updated because it was not CANCELED self.assertEqual(jobs[3].status, 'FAILED') self.assertEqual(jobs[3].last_status_change, old_when) # Make sure update_recipe and update_recipe_metrics messages were created self.assertEqual(len(message.new_messages), 2) update_recipe_msg = None update_recipe_metrics_msg = None for msg in message.new_messages: if msg.type == 'update_recipe': update_recipe_msg = msg elif msg.type == 'update_recipe_metrics': update_recipe_metrics_msg = msg self.assertIsNotNone(update_recipe_msg) self.assertIsNotNone(update_recipe_metrics_msg) self.assertEqual(update_recipe_msg.root_recipe_id, recipe.id) self.assertDictEqual( convert_forced_nodes_to_v6( update_recipe_msg.forced_nodes).get_dict(), forced_nodes_dict) self.assertListEqual(update_recipe_metrics_msg._recipe_ids, [recipe.id])
def setUp(self): django.setup() manifest1 = job_test_utils.create_seed_manifest(name='type-1', jobVersion='1.0.0') self.job_type_1 = job_test_utils.create_seed_job_type( manifest=manifest1) manifest2 = job_test_utils.create_seed_manifest(name='type-2', jobVersion='2.0.0') self.job_type_2 = job_test_utils.create_seed_job_type( manifest=manifest2) manifest3 = job_test_utils.create_seed_manifest(name='type-1', jobVersion='2.0.0') self.job_type_3 = job_test_utils.create_seed_job_type( manifest=manifest3) self.entry_1_longest = datetime.datetime.utcfromtimestamp( 500000).replace(tzinfo=timezone.utc) self.entry_1_shortest = datetime.datetime.utcfromtimestamp( 650000).replace(tzinfo=timezone.utc) self.entry_2_longest = datetime.datetime.utcfromtimestamp( 600000).replace(tzinfo=timezone.utc) self.entry_2_shortest = datetime.datetime.utcfromtimestamp( 750000).replace(tzinfo=timezone.utc) self.entry_3_longest = datetime.datetime.utcfromtimestamp( 700000).replace(tzinfo=timezone.utc) self.entry_3_shortest = datetime.datetime.utcfromtimestamp( 800000).replace(tzinfo=timezone.utc) job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=self.entry_1_longest) job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=self.entry_1_shortest) job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_shortest) job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_longest) job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_shortest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_shortest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_longest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_longest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_shortest)
def test_calculate_stats(self): """Tests calculating individual statistics for a metrics entry.""" error = error_test_utils.create_error(is_builtin=True) job1 = job_test_utils.create_job(error=error, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe( job=job1, error=error, status=job1.status, queued=datetime.datetime(2015, 1, 1, tzinfo=utc), started=datetime.datetime(2015, 1, 1, 0, 10, 2, tzinfo=utc), ended=datetime.datetime(2015, 1, 1, 6, 0, 16, tzinfo=utc), ) job2 = job_test_utils.create_job(error=error, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe( job=job2, error=error, status=job2.status, queued=datetime.datetime(2015, 1, 1, tzinfo=utc), started=datetime.datetime(2015, 1, 1, 2, 10, 2, tzinfo=utc), ended=datetime.datetime(2015, 1, 1, 16, 0, 58, tzinfo=utc), ) sys_error = error_test_utils.create_error(category='SYSTEM', is_builtin=True) job3a = job_test_utils.create_job(error=sys_error, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job3a, status=job3a.status, ended=job3a.ended, error=sys_error) data_error = error_test_utils.create_error(category='DATA', is_builtin=True) job3b = job_test_utils.create_job(error=data_error, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job3b, status=job3b.status, ended=job3b.ended, error=data_error) algo_error = error_test_utils.create_error(category='ALGORITHM', is_builtin=True) job3c = job_test_utils.create_job(error=algo_error, status='FAILED', ended=datetime.datetime(2015, 1, 1, tzinfo=utc)) job_test_utils.create_job_exe(job=job3c, status=job3c.status, ended=job3c.ended, error=algo_error) MetricsError.objects.calculate( datetime.datetime(2015, 1, 1, tzinfo=utc)) entries = MetricsError.objects.filter( occurred=datetime.datetime(2015, 1, 1, tzinfo=utc)) self.assertEqual(len(entries), 4) for entry in entries: self.assertEqual(entry.occurred, datetime.datetime(2015, 1, 1, tzinfo=utc)) if entry.error == error: self.assertEqual(entry.total_count, 2) else: self.assertEqual(entry.total_count, 1)
def test_type_revisions(self): """Tests calling /timeline/recipe-types/ filtered by recipe type names""" # create recipe type recipe_def = { 'version': '7', 'input': { 'files': [{ 'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True, 'multiple': False }], 'json': [] }, 'nodes': { 'node_a': { 'dependencies': [], 'input': { 'INPUT_FILE': { 'type': 'recipe', 'input': 'INPUT_FILE' } }, 'node_type': { 'node_type': 'job', 'job_type_name': self.job_type_1.name, 'job_type_version': self.job_type_1.version, 'job_type_revision': self.job_type_1.revision_num } } } } rtype = recipe_test_utils.create_recipe_type_v6(name='revision-recipe', definition=recipe_def) recipe_def_v2 = { 'version': '7', 'input': { 'files': [{ 'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True, 'multiple': False }], 'json': [] }, 'nodes': { 'node_a': { 'dependencies': [], 'input': { 'INPUT_FILE': { 'type': 'recipe', 'input': 'INPUT_FILE' } }, 'node_type': { 'node_type': 'job', 'job_type_name': self.job_type_1.name, 'job_type_version': self.job_type_1.version, 'job_type_revision': self.job_type_1.revision_num } }, 'node_b': { 'dependencies': [], 'input': { 'INPUT_FILE': { 'type': 'recipe', 'input': 'INPUT_FILE' } }, 'node_type': { 'node_type': 'job', 'job_type_name': self.job_type_2.name, 'job_type_version': self.job_type_2.version, 'job_type_revision': self.job_type_2.revision_num } } } } recipe_test_utils.edit_recipe_type_v6(rtype, title='edited recipe', definition=recipe_def_v2, auto_update=False) rtype_edit = RecipeType.objects.get(id=rtype.id) for i in range(1, 7): date_1 = datetime.datetime(2020, 1, i, tzinfo=utc) date_2 = datetime.datetime(2020, 1, i + 1, tzinfo=utc) file_1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0, source_started=date_1, source_ended=date_2) input_data = { 'version': '1.0', 'input_data': [{ 'name': 'INPUT_FILE', 'file_id': file_1.id }] } # Recipe 1's jobs recipe_1 = recipe_test_utils.create_recipe(recipe_type=rtype_edit, input=input_data) job_1 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_1, ended=date_1) job_1.recipe_id = recipe_1.id job_1.save() job_2 = job_test_utils.create_job(job_type=self.job_type_2, status='COMPLETED', started=date_2, ended=date_2) job_2.recipe_id = recipe_1.id job_2.save() started = '2020-01-01T00:00:00Z' ended = '2020-02-01T00:00:00Z' url = '/%s/timeline/recipe-types/?started=%s&ended=%s&id=%s&rev=%s' % ( self.api, started, ended, rtype_edit.id, rtype_edit.revision_num) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) results = result['results'] self.assertEqual(len(results), 1) self.assertEqual(results[0]['name'], rtype_edit.name) self.assertEqual(results[0]['revision_num'], rtype_edit.revision_num) self.assertEqual(results[0]['title'], rtype_edit.title)
def test_execute(self): """Tests calling RequeueJobsBulk.execute() successfully""" # Importing module here to patch the max batch size import queue.messages.requeue_jobs_bulk queue.messages.requeue_jobs_bulk.MAX_BATCH_SIZE = 5 sys_err = error_test_utils.create_error(category='SYSTEM') data = JobData() job_type = job_test_utils.create_seed_job_type() job_1 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err, input=data.get_dict()) job_2 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err, input=data.get_dict()) job_3 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='FAILED', error=sys_err) job_4 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err, input=data.get_dict()) job_5 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='CANCELED', error=sys_err, input=data.get_dict()) job_6 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err, input=data.get_dict()) job_7 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err, input=data.get_dict()) # Create message message = queue.messages.requeue_jobs_bulk.RequeueJobsBulk() message.error_ids = [sys_err.id] message.job_type_ids = [job_type.id] message.priority = 10001 message.status = 'FAILED' # Execute message result = message.execute() self.assertTrue(result) # Should be two messages, one for next bulk re-queue and one for re-queuing the specific jobs self.assertEqual(len(message.new_messages), 2) requeue_bulk_message = message.new_messages[0] requeue_message = message.new_messages[1] self.assertEqual(requeue_bulk_message.type, 'requeue_jobs_bulk') self.assertEqual(requeue_bulk_message.current_job_id, job_2.id) self.assertEqual(requeue_message.type, 'requeue_jobs') # Job 5 is skipped due to CANCELED and job 3 has not been queued yet (forced illegal state) self.assertListEqual(requeue_message._requeue_jobs, [ QueuedJob(job_7.id, job_7.num_exes), QueuedJob(job_6.id, job_6.num_exes), QueuedJob(job_4.id, job_4.num_exes), QueuedJob(job_2.id, job_2.num_exes) ]) self.assertEqual(requeue_message.priority, 10001) # Test executing message again message.new_messages = [] result = message.execute() self.assertTrue(result) # Should have same messages returned self.assertEqual(len(message.new_messages), 2) requeue_bulk_message = message.new_messages[0] requeue_message = message.new_messages[1] self.assertEqual(requeue_bulk_message.type, 'requeue_jobs_bulk') self.assertEqual(requeue_bulk_message.current_job_id, job_2.id) self.assertEqual(requeue_message.type, 'requeue_jobs') # Job 5 is skipped due to CANCELED and job 3 has not been queued yet (forced illegal state) self.assertListEqual(requeue_message._requeue_jobs, [ QueuedJob(job_7.id, job_7.num_exes), QueuedJob(job_6.id, job_6.num_exes), QueuedJob(job_4.id, job_4.num_exes), QueuedJob(job_2.id, job_2.num_exes) ]) self.assertEqual(requeue_message.priority, 10001)
def test_get_nodes_to_process_input(self): """Tests calling Recipe.get_nodes_to_process_input()""" data_dict = convert_data_to_v6_json(Data()).get_dict() job_type = job_test_utils.create_job_type() sub_recipe_type = recipe_test_utils.create_recipe_type() # Create recipe definition = RecipeDefinition(Interface()) definition.add_job_node('A', job_type.name, job_type.version, job_type.revision_num) definition.add_condition_node('B', Interface(), DataFilter(True)) definition.add_condition_node('C', Interface(), DataFilter(True)) definition.add_condition_node('D', Interface(), DataFilter(False)) definition.add_job_node('E', job_type.name, job_type.version, job_type.revision_num) definition.add_job_node('F', job_type.name, job_type.version, job_type.revision_num) definition.add_recipe_node('G', sub_recipe_type.name, sub_recipe_type.revision_num) definition.add_recipe_node('H', sub_recipe_type.name, sub_recipe_type.revision_num) definition.add_dependency('A', 'D') definition.add_dependency('A', 'E') definition.add_dependency('B', 'E') definition.add_dependency('B', 'F') definition.add_dependency('C', 'F') definition.add_dependency('D', 'G') definition.add_dependency('E', 'G') definition.add_dependency('E', 'H') definition_json_dict = convert_recipe_definition_to_v6_json( definition).get_dict() recipe_type = recipe_test_utils.create_recipe_type( definition=definition_json_dict) recipe = recipe_test_utils.create_recipe(recipe_type=recipe_type, input=data_dict) # Nodes A, B, and D already exist job_a = job_test_utils.create_job(job_type=job_type, status='COMPLETED', input=data_dict, output=data_dict, save=True) condition_b = recipe_test_utils.create_recipe_condition( is_processed=True, is_accepted=True, save=False) condition_d = recipe_test_utils.create_recipe_condition( is_processed=True, is_accepted=False, save=False) RecipeCondition.objects.bulk_create([condition_b, condition_d]) recipe_node_a = recipe_test_utils.create_recipe_node(recipe=recipe, node_name='A', job=job_a, save=False) recipe_node_b = recipe_test_utils.create_recipe_node( recipe=recipe, node_name='B', condition=condition_b, save=False) recipe_node_d = recipe_test_utils.create_recipe_node( recipe=recipe, node_name='D', condition=condition_d, save=False) RecipeNode.objects.bulk_create( [recipe_node_a, recipe_node_b, recipe_node_d]) recipe_instance = Recipe.objects.get_recipe_instance(recipe.id) nodes_to_process = recipe_instance.get_nodes_to_process_input() self.assertSetEqual(set(nodes_to_process.keys()), {'C', 'E'})
def test_execute_with_recipe_legacy(self): """Tests calling ProcessRecipeInput.execute() successfully when a legacy sub-recipe has to get its data from its recipe """ workspace = storage_test_utils.create_workspace() file_1 = storage_test_utils.create_file(workspace=workspace, file_size=104857600.0) file_2 = storage_test_utils.create_file(workspace=workspace, file_size=987654321.0) file_3 = storage_test_utils.create_file(workspace=workspace, file_size=65456.0) file_4 = storage_test_utils.create_file(workspace=workspace, file_size=24564165456.0) manifest_a = { 'seedVersion': '1.0.0', 'job': { 'name': 'job-a', 'jobVersion': '1.0.0', 'packageVersion': '1.0.0', 'title': '', 'description': '', 'maintainer': { 'name': 'John Doe', 'email': '*****@*****.**' }, 'timeout': 10, 'interface': { 'command': '', 'inputs': { 'files': [], 'json': [] }, 'outputs': { 'files': [{ 'name': 'output_a', 'pattern': '*.png' }] } } } } job_type_a = job_test_utils.create_job_type(interface=manifest_a) output_data_a = Data() output_data_a.add_value(FileValue('output_a', [file_1.id])) output_data_a_dict = convert_data_to_v6_json(output_data_a).get_dict() manifest_b = { 'seedVersion': '1.0.0', 'job': { 'name': 'job-b', 'jobVersion': '1.0.0', 'packageVersion': '1.0.0', 'title': '', 'description': '', 'maintainer': { 'name': 'John Doe', 'email': '*****@*****.**' }, 'timeout': 10, 'interface': { 'command': '', 'inputs': { 'files': [], 'json': [] }, 'outputs': { 'files': [{ 'name': 'output_b', 'pattern': '*.png', 'multiple': True }] } } } } job_type_b = job_test_utils.create_job_type(interface=manifest_b) output_data_b = Data() output_data_b.add_value( FileValue('output_b', [file_2.id, file_3.id, file_4.id])) output_data_b_dict = convert_data_to_v6_json(output_data_b).get_dict() job_a = job_test_utils.create_job(job_type=job_type_a, num_exes=1, status='COMPLETED', output=output_data_a_dict) job_b = job_test_utils.create_job(job_type=job_type_b, num_exes=1, status='COMPLETED', output=output_data_b_dict) sub_recipe_interface_c = Interface() sub_recipe_interface_c.add_parameter( FileParameter('input_a', ['image/png'])) sub_recipe_interface_c.add_parameter( FileParameter('input_b', ['image/png'], multiple=True)) sub_recipe_def_c = RecipeDefinition(sub_recipe_interface_c) sub_recipe_def_dict_c = convert_recipe_definition_to_v1_json( sub_recipe_def_c).get_dict() sub_recipe_type_c = recipe_test_utils.create_recipe_type( definition=sub_recipe_def_dict_c) sub_recipe_c = recipe_test_utils.create_recipe( recipe_type=sub_recipe_type_c) definition = RecipeDefinition(Interface()) definition.add_job_node('node_a', job_type_a.name, job_type_a.version, job_type_a.revision_num) definition.add_job_node('node_b', job_type_b.name, job_type_b.version, job_type_b.revision_num) definition.add_recipe_node('node_c', sub_recipe_type_c.name, sub_recipe_type_c.revision_num) definition.add_dependency('node_c', 'node_a') definition.add_dependency_input_connection('node_c', 'input_a', 'node_a', 'output_a') definition.add_dependency('node_c', 'node_b') definition.add_dependency_input_connection('node_c', 'input_b', 'node_b', 'output_b') def_dict = convert_recipe_definition_to_v6_json(definition).get_dict() recipe_type = recipe_test_utils.create_recipe_type(definition=def_dict) recipe_data_dict = { 'version': '1.0', 'input_data': [], 'workspace_id': workspace.id } recipe = recipe_test_utils.create_recipe(recipe_type=recipe_type, input=recipe_data_dict) recipe_node_a = recipe_test_utils.create_recipe_node( recipe=recipe, node_name='node_a', job=job_a) recipe_node_b = recipe_test_utils.create_recipe_node( recipe=recipe, node_name='node_b', job=job_b) recipe_node_c = recipe_test_utils.create_recipe_node( recipe=recipe, node_name='node_c', sub_recipe=sub_recipe_c) RecipeNode.objects.bulk_create( [recipe_node_a, recipe_node_b, recipe_node_c]) job_a.recipe = recipe job_a.save() job_b.recipe = recipe job_b.save() sub_recipe_c.recipe = recipe sub_recipe_c.save() # Create message message = ProcessRecipeInput() message.recipe_id = sub_recipe_c.id # Execute message result = message.execute() self.assertTrue(result) sub_recipe_c = Recipe.objects.get(id=sub_recipe_c.id) # Check for update_recipe message self.assertEqual(len(message.new_messages), 1) self.assertEqual(message.new_messages[0].type, 'update_recipe') # Check sub-recipe for expected input_file_size self.assertEqual(sub_recipe_c.input_file_size, 24469.0) # Check sub-recipe for expected input data self.assertEqual( sub_recipe_c.input['version'], '1.0') # Should be legacy input data with workspace ID self.assertEqual(sub_recipe_c.input['workspace_id'], workspace.id) self.assertSetEqual(set(sub_recipe_c.get_input_data().values.keys()), {'input_a', 'input_b'}) self.assertListEqual( sub_recipe_c.get_input_data().values['input_a'].file_ids, [file_1.id]) self.assertListEqual( sub_recipe_c.get_input_data().values['input_b'].file_ids, [file_2.id, file_3.id, file_4.id]) # Make sure sub-recipe input file models are created input_files = RecipeInputFile.objects.filter(recipe_id=sub_recipe_c.id) self.assertEqual(len(input_files), 4) file_ids = {input_file.input_file_id for input_file in input_files} self.assertSetEqual(file_ids, {file_1.id, file_2.id, file_3.id, file_4.id}) # Test executing message again message_json_dict = message.to_json() message = ProcessRecipeInput.from_json(message_json_dict) result = message.execute() self.assertTrue(result) # Still should have update_recipe message self.assertEqual(len(message.new_messages), 1) self.assertEqual(message.new_messages[0].type, 'update_recipe') # Make sure recipe input file models are unchanged input_files = RecipeInputFile.objects.filter(recipe_id=sub_recipe_c.id) self.assertEqual(len(input_files), 4)
def test_execute_recipe_superseded(self): """Tests calling CreateJobs.execute() successfully for jobs within a recipe that supersedes another recipe""" from batch.test import utils as batch_test_utils from recipe.models import RecipeNode from recipe.test import utils as recipe_test_utils batch = batch_test_utils.create_batch() event = trigger_test_utils.create_trigger_event() job_type_1 = job_test_utils.create_seed_job_type() job_type_2 = job_test_utils.create_seed_job_type() superseded_recipe = recipe_test_utils.create_recipe() superseded_job_1 = job_test_utils.create_job(job_type=job_type_1, is_superseded=True) superseded_job_2 = job_test_utils.create_job(job_type=job_type_2, is_superseded=True) recipe_test_utils.create_recipe_node(recipe=superseded_recipe, node_name='node_1', job=superseded_job_1, save=True) recipe_test_utils.create_recipe_node(recipe=superseded_recipe, node_name='node_2', job=superseded_job_2, save=True) recipe = recipe_test_utils.create_recipe( recipe_type=superseded_recipe.recipe_type, superseded_recipe=superseded_recipe, event=event, batch=batch) recipe_jobs = [ RecipeJob(job_type_1.name, job_type_1.version, job_type_1.revision_num, 'node_1', False), RecipeJob(job_type_2.name, job_type_2.version, job_type_2.revision_num, 'node_2', True) ] # Create and execute message message = create_jobs_messages_for_recipe(recipe, recipe_jobs)[0] result = message.execute() self.assertTrue(result) self.assertEqual( Job.objects.filter(recipe_id=recipe.id, event_id=recipe.event_id).count(), 2) recipe_nodes = RecipeNode.objects.select_related('job').filter( recipe_id=recipe.id) self.assertEqual(len(recipe_nodes), 2) for recipe_node in recipe_nodes: if recipe_node.node_name == 'node_1': job_1 = recipe_node.job self.assertEqual(job_1.job_type_id, job_type_1.id) self.assertEqual(job_1.event_id, event.id) self.assertEqual(job_1.batch_id, batch.id) self.assertEqual(job_1.recipe_id, recipe.id) self.assertEqual(job_1.root_recipe_id, superseded_recipe.id) self.assertEqual(job_1.superseded_job_id, superseded_job_1.id) self.assertEqual(job_1.root_superseded_job_id, superseded_job_1.id) elif recipe_node.node_name == 'node_2': job_2 = recipe_node.job self.assertEqual(job_2.job_type_id, job_type_2.id) self.assertEqual(job_2.event_id, event.id) self.assertEqual(job_2.batch_id, batch.id) self.assertEqual(job_2.recipe_id, recipe.id) self.assertEqual(job_2.root_recipe_id, superseded_recipe.id) self.assertEqual(job_2.superseded_job_id, superseded_job_2.id) self.assertEqual(job_2.root_superseded_job_id, superseded_job_2.id) else: self.fail('%s is the wrong node name' % recipe_node.node_name) # Should be two messages, one for processing job input and one for updating metrics for the recipe self.assertEqual(len(message.new_messages), 2) process_job_input_msg = None update_metrics_msg = None for msg in message.new_messages: if msg.type == 'process_job_input': process_job_input_msg = msg elif msg.type == 'update_recipe_metrics': update_metrics_msg = msg self.assertIsNotNone(process_job_input_msg) self.assertIsNotNone(update_metrics_msg) # Check message to process job input for new job 2 self.assertEqual(process_job_input_msg.job_id, job_2.id) # Check message to update recipe metrics for the recipe containing the new jobs self.assertListEqual(update_metrics_msg._recipe_ids, [recipe.id]) # Test executing message again message_json_dict = message.to_json() message = CreateJobs.from_json(message_json_dict) result = message.execute() self.assertTrue(result) self.assertEqual( Job.objects.filter(recipe_id=recipe.id, event_id=recipe.event_id).count(), 2) recipe_nodes = RecipeNode.objects.select_related('job').filter( recipe_id=recipe.id) self.assertEqual(len(recipe_nodes), 2) for recipe_node in recipe_nodes: if recipe_node.node_name == 'node_1': job_1 = recipe_node.job self.assertEqual(job_1.job_type_id, job_type_1.id) self.assertEqual(job_1.event_id, event.id) self.assertEqual(job_1.batch_id, batch.id) self.assertEqual(job_1.recipe_id, recipe.id) self.assertEqual(job_1.root_recipe_id, superseded_recipe.id) self.assertEqual(job_1.superseded_job_id, superseded_job_1.id) self.assertEqual(job_1.root_superseded_job_id, superseded_job_1.id) elif recipe_node.node_name == 'node_2': job_2 = recipe_node.job self.assertEqual(job_2.job_type_id, job_type_2.id) self.assertEqual(job_2.event_id, event.id) self.assertEqual(job_2.batch_id, batch.id) self.assertEqual(job_2.recipe_id, recipe.id) self.assertEqual(job_2.root_recipe_id, superseded_recipe.id) self.assertEqual(job_2.superseded_job_id, superseded_job_2.id) self.assertEqual(job_2.root_superseded_job_id, superseded_job_2.id) else: self.fail('%s is the wrong node name' % recipe_node.node_name) # Should be two messages, one for processing job input and one for updating metrics for the recipe self.assertEqual(len(message.new_messages), 2) process_job_input_msg = None update_metrics_msg = None for msg in message.new_messages: if msg.type == 'process_job_input': process_job_input_msg = msg elif msg.type == 'update_recipe_metrics': update_metrics_msg = msg self.assertIsNotNone(process_job_input_msg) self.assertIsNotNone(update_metrics_msg) # Check message to process job input for new job 2 self.assertEqual(process_job_input_msg.job_id, job_2.id) # Check message to update recipe metrics for the recipe containing the new jobs self.assertListEqual(update_metrics_msg._recipe_ids, [recipe.id])