예제 #1
0
    def test_json(self):
        """Tests coverting a RequeueJobsBulk message to and from JSON"""

        sys_err = error_test_utils.create_error(category='SYSTEM')

        data = JobData()
        job_type = job_test_utils.create_job_type()
        job_1 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err,
                                          input=data.get_dict())
        job_2 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='CANCELED', error=sys_err,
                                          input=data.get_dict())

        # Create message
        message = RequeueJobsBulk()
        message.started = job_1.last_modified - timedelta(seconds=1)
        message.ended = job_1.last_modified + timedelta(seconds=1)
        message.error_categories = ['SYSTEM']
        message.error_ids = [sys_err.id]
        message.job_ids = [job_1.id]
        message.job_type_ids = [job_type.id]
        message.priority = 1
        message.status = 'FAILED'

        # Convert message to JSON and back, and then execute
        message_json_dict = message.to_json()
        new_message = RequeueJobsBulk.from_json(message_json_dict)
        result = new_message.execute()

        self.assertTrue(result)
        # Should be one re-queue message for job 1
        self.assertEqual(len(new_message.new_messages), 1)
        message = new_message.new_messages[0]
        self.assertEqual(message.type, 'requeue_jobs')
        self.assertListEqual(message._requeue_jobs, [QueuedJob(job_1.id, job_1.num_exes)])
        self.assertEqual(message.priority, 1)
예제 #2
0
    def test_json(self):
        """Tests coverting a QueuedJobs message to and from JSON"""

        data = JobData()
        job_1 = job_test_utils.create_job(num_exes=0,
                                          status='PENDING',
                                          input=data.get_dict())
        job_2 = job_test_utils.create_job(num_exes=1,
                                          status='FAILED',
                                          input=data.get_dict())
        job_3 = job_test_utils.create_job(num_exes=1,
                                          status='COMPLETED',
                                          input=data.get_dict())
        job_4 = job_test_utils.create_job(num_exes=0,
                                          status='CANCELED',
                                          input=data.get_dict())
        job_ids = [job_1.id, job_2.id, job_3.id, job_4.id]

        # Add jobs to message
        message = QueuedJobs()
        message.priority = 1
        if message.can_fit_more():
            message.add_job(job_1.id, job_1.num_exes)
        if message.can_fit_more():
            message.add_job(job_2.id, job_2.num_exes - 1)  # Mismatched exe_num
        if message.can_fit_more():
            message.add_job(job_3.id, job_3.num_exes)
        if message.can_fit_more():
            message.add_job(job_4.id, job_4.num_exes)

        # Convert message to JSON and back, and then execute
        message_json_dict = message.to_json()
        new_message = QueuedJobs.from_json(message_json_dict)
        result = new_message.execute()

        self.assertTrue(result)
        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        self.assertEqual(jobs[0].status, 'QUEUED')
        self.assertEqual(jobs[0].num_exes, 1)
        self.assertEqual(jobs[1].status, 'FAILED')
        self.assertEqual(jobs[1].num_exes, 1)
        self.assertEqual(jobs[2].status, 'COMPLETED')
        self.assertEqual(jobs[2].num_exes, 1)
        self.assertEqual(jobs[3].status, 'CANCELED')
        self.assertEqual(jobs[3].num_exes, 0)
        # Ensure priority is correctly set
        queue = Queue.objects.get(job_id=job_1.id)
        self.assertEqual(queue.priority, 1)
예제 #3
0
    def test_json(self):
        """Tests coverting a CancelJobsBulk message to and from JSON"""

        sys_err = error_test_utils.create_error(category='SYSTEM')

        data = JobData()
        batch = batch_test_utils.create_batch()
        recipe = recipe_test_utils.create_recipe()
        job_type = job_test_utils.create_job_type()
        job_1 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          error=sys_err,
                                          input=data.get_dict())
        job_1.batch_id = batch.id
        job_1.recipe_id = recipe.id
        job_1.save()
        job_2 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          error=sys_err,
                                          input=data.get_dict())

        # Create message
        message = CancelJobsBulk()
        message.started = job_1.last_modified - timedelta(seconds=1)
        message.ended = job_1.last_modified + timedelta(seconds=1)
        message.error_categories = ['SYSTEM']
        message.error_ids = [sys_err.id]
        message.job_ids = [job_1.id]
        message.job_type_ids = [job_type.id]
        message.status = 'FAILED'
        message.job_type_names = [job_type.name]
        message.batch_ids = [batch.id]
        message.recipe_ids = [recipe.id]
        message.is_superseded = False

        # Convert message to JSON and back, and then execute
        message_json_dict = message.to_json()
        new_message = CancelJobsBulk.from_json(message_json_dict)
        result = new_message.execute()

        self.assertTrue(result)
        # Should be one cancel message for job 1
        self.assertEqual(len(new_message.new_messages), 1)
        message = new_message.new_messages[0]
        self.assertEqual(message.type, 'cancel_jobs')
        self.assertListEqual(message._job_ids, [job_1.id])
예제 #4
0
    def test_json(self):
        """Tests coverting a CancelJobs message to and from JSON"""

        when = now()
        data = JobData()
        job_type = job_test_utils.create_seed_job_type()
        job_1 = job_test_utils.create_job(job_type=job_type, status='PENDING')
        job_2 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          input=data.get_dict())
        job_ids = [job_1.id, job_2.id]

        # Add jobs to message
        message = CancelJobs()
        message.when = when
        if message.can_fit_more():
            message.add_job(job_1.id)
        if message.can_fit_more():
            message.add_job(job_2.id)

        # Convert message to JSON and back, and then execute
        message_json_dict = message.to_json()
        new_message = CancelJobs.from_json(message_json_dict)
        result = new_message.execute()

        self.assertTrue(result)
        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        # Both jobs should have been canceled
        self.assertEqual(jobs[0].status, 'CANCELED')
        self.assertEqual(jobs[0].last_status_change, when)
        self.assertEqual(jobs[1].status, 'CANCELED')
        self.assertEqual(jobs[1].last_status_change, when)
        # No new messages since these jobs do not belong to a recipe
        self.assertEqual(len(new_message.new_messages), 0)
예제 #5
0
파일: parse_rule.py 프로젝트: Carl4/scale
    def process_parse(self, source_file):
        '''Processes the given source file parse by creating the appropriate jobs if the rule is triggered. All
        database changes are made in an atomic transaction.

        :param source_file_id: The source file that was parsed
        :type source_file_id: :class:`source.models.SourceFile`
        '''

        # If this parse file has the correct media type or the correct data types, the rule is triggered
        media_type_match = not self._media_type or self._media_type == source_file.media_type
        data_types_match = not self._data_types or self._data_types <= source_file.get_data_type_tags()

        if not media_type_match or not data_types_match:
            return

        msg = 'Parse rule for '
        if not self._media_type:
            msg += 'all media types '
        else:
            msg += 'media type %s ' % self._media_type
        if self._data_types:
            msg += 'and data types %s ' % ','.join(self._data_types)
        msg += 'was triggered'
        logger.info(msg)

        event = ParseTriggerEvent(self._model, source_file).save_to_db()

        # Create triggered jobs
        for job in self._jobs_to_create:
            job_type = self._job_type_map[(job['job_type']['name'], job['job_type']['version'])]
            file_input_name = job['file_input_name']
            job_data = JobData({})
            job_data.add_file_input(file_input_name, source_file.id)

            # If workspace name has been provided, add that to the job data for each output file
            if 'workspace_name' in job:
                workspace = self._workspace_map[job['workspace_name']]
                job_type.get_job_interface().add_workspace_to_data(job_data, workspace.id)
            logger.info('Queuing new job of type %s %s', job_type.name, job_type.version)
            Queue.objects.queue_new_job(job_type, job_data.get_dict(), event)

        # Create triggered recipes
        for recipe in self._recipes_to_create:
            recipe_type = self._recipe_type_map[(recipe['recipe_type']['name'], recipe['recipe_type']['version'])]
            file_input_name = recipe['file_input_name']
            recipe_data = RecipeData({})
            recipe_data.add_file_input(file_input_name, source_file.id)

            # If workspace name has been provided, add that to the recipe data for each output file
            if 'workspace_name' in recipe:
                workspace = self._workspace_map[recipe['workspace_name']]
                recipe_data.set_workspace_id(workspace.id)
            logger.info('Queuing new recipe of type %s %s', recipe_type.name, recipe_type.version)
            Queue.objects.queue_new_recipe(recipe_type, recipe_data.get_dict(), event)
예제 #6
0
    def test_json(self):
        """Tests coverting a RequeueJobs message to and from JSON"""

        data = JobData()
        job_type = job_test_utils.create_seed_job_type(max_tries=3)
        job_1 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          input=data.get_dict())
        job_2 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          input=data.get_dict())
        job_ids = [job_1.id, job_2.id]

        # Add jobs to message
        message = RequeueJobs()
        message.priority = 1
        if message.can_fit_more():
            message.add_job(job_1.id, job_1.num_exes)
        if message.can_fit_more():
            message.add_job(job_2.id, job_2.num_exes - 1)  # Mismatched exe_num

        # Convert message to JSON and back, and then execute
        message_json_dict = message.to_json()
        new_message = RequeueJobs.from_json(message_json_dict)
        result = new_message.execute()

        self.assertTrue(result)
        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        # Job 1 should have been good to re-queue, job 2 should have had mismatched exe_num and not been re-queued
        self.assertEqual(jobs[0].max_tries, 6)
        self.assertEqual(jobs[1].max_tries, 3)
        self.assertEqual(len(new_message.new_messages), 1)
        message = new_message.new_messages[0]
        self.assertEqual(message.type, 'queued_jobs')
        self.assertListEqual(message._queued_jobs,
                             [QueuedJob(job_1.id, job_1.num_exes)])
        self.assertTrue(message.requeue)
        self.assertEqual(message.priority, 1)
예제 #7
0
    def process_parsed_source_file(self, source_file):
        '''Processes the given parsed source file by checking it against all parse trigger rules and creating the
        corresponding jobs and recipes for any triggered rules. All database changes are made in an atomic transaction.

        :param source_file: The source file that was parsed
        :type source_file: :class:`source.models.SourceFile`
        '''

        msg = 'Processing trigger rules for parsed source file with media type %s and data types %s'
        logger.info(msg, source_file.media_type,
                    str(list(source_file.get_data_type_tags())))

        any_rules = False
        for entry in RecipeType.objects.get_active_trigger_rules(PARSE_TYPE):
            rule = entry[0]
            thing_to_create = entry[1]
            rule_config = rule.get_configuration()
            condition = rule_config.get_condition()

            if condition.is_condition_met(source_file):
                logger.info(condition.get_triggered_message())
                any_rules = True

                event = self._create_parse_trigger_event(source_file, rule)
                workspace = Workspace.objects.get(
                    name=rule_config.get_workspace_name())

                if isinstance(thing_to_create, JobType):
                    job_type = thing_to_create
                    job_data = JobData({})
                    job_data.add_file_input(rule_config.get_input_data_name(),
                                            source_file.id)
                    job_type.get_job_interface().add_workspace_to_data(
                        job_data, workspace.id)
                    logger.info('Queuing new job of type %s %s', job_type.name,
                                job_type.version)
                    Queue.objects.queue_new_job(job_type, job_data.get_dict(),
                                                event)
                elif isinstance(thing_to_create, RecipeType):
                    recipe_type = thing_to_create
                    recipe_data = RecipeData({})
                    recipe_data.add_file_input(
                        rule_config.get_input_data_name(), source_file.id)
                    recipe_data.set_workspace_id(workspace.id)
                    logger.info('Queuing new recipe of type %s %s',
                                recipe_type.name, recipe_type.version)
                    Queue.objects.queue_new_recipe(recipe_type,
                                                   recipe_data.get_dict(),
                                                   event)

        if not any_rules:
            logger.info('No rules triggered')
예제 #8
0
파일: ingest_rule.py 프로젝트: Carl4/scale
    def process_ingest(self, ingest, source_file_id):
        """Processes the given source file ingest by creating the appropriate jobs if the rule is triggered. All
        database changes are made in an atomic transaction.

        :param ingest: The ingest to process
        :type ingest: :class:`ingest.models.Ingest`
        :param source_file_id: The ID of the source file that was ingested
        :type source_file_id: long
        """

        # Only trigger when this ingest file has the correct media type and ingest types
        if self._media_type and self._media_type != ingest.media_type:
            return
        if not self._data_types.issubset(ingest.get_data_type_tags()):
            return

        if not self._media_type:
            logger.info("Ingest rule for all media types was triggered")
        else:
            logger.info("Ingest rule for media type %s was triggered", self._media_type)
        event = IngestTriggerEvent(self._model, ingest).save_to_db()

        # Create triggered jobs
        for job in self._jobs_to_create:
            job_type = self._job_type_map[(job["job_type"]["name"], job["job_type"]["version"])]
            file_input_name = job["file_input_name"]
            job_data = JobData({})
            job_data.add_file_input(file_input_name, source_file_id)

            # If workspace name has been provided, add that to the job data for each output file
            if "workspace_name" in job:
                workspace = self._workspace_map[job["workspace_name"]]
                job_type.get_job_interface().add_workspace_to_data(job_data, workspace.id)
            logger.info("Queuing new job of type %s %s", job_type.name, job_type.version)
            Queue.objects.queue_new_job(job_type, job_data.get_dict(), event)

        # Create triggered recipes
        for recipe in self._recipes_to_create:
            recipe_type = self._recipe_type_map[(recipe["recipe_type"]["name"], recipe["recipe_type"]["version"])]
            file_input_name = recipe["file_input_name"]
            recipe_data = RecipeData({})
            recipe_data.add_file_input(file_input_name, source_file_id)

            # If workspace name has been provided, add that to the recipe data for each output file
            if "workspace_name" in recipe:
                workspace = self._workspace_map[recipe["workspace_name"]]
                recipe_data.set_workspace_id(workspace.id)
            logger.info("Queuing new recipe of type %s %s", recipe_type.name, recipe_type.version)
            Queue.objects.queue_new_recipe(recipe_type, recipe_data.get_dict(), event)
예제 #9
0
    def test_execute_canceled(self):
        """Tests calling RequeueJobsBulk.execute() successfully to requeue canceled jobs"""

        data = JobData()
        job_type = job_test_utils.create_job_type()
        job_1 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='CANCELED',
                                          input=data.get_dict())
        job_2 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=0,
                                          status='CANCELED')

        # Create message
        message = RequeueJobsBulk()
        message.job_type_ids = [job_type.id]
        message.priority = 10001

        # Execute message
        result = message.execute()
        self.assertTrue(result)

        # Should be one message for re-queuing both jobs
        self.assertEqual(len(message.new_messages), 1)
        requeue_message = message.new_messages[0]
        self.assertEqual(requeue_message.type, 'requeue_jobs')
        self.assertListEqual(requeue_message._requeue_jobs, [
            QueuedJob(job_2.id, job_2.num_exes),
            QueuedJob(job_1.id, job_1.num_exes)
        ])
        self.assertEqual(requeue_message.priority, 10001)

        # Test executing message again
        message.new_messages = []
        result = message.execute()
        self.assertTrue(result)

        # Should have same message returned
        self.assertEqual(len(message.new_messages), 1)
        requeue_message = message.new_messages[0]
        self.assertEqual(requeue_message.type, 'requeue_jobs')
        self.assertListEqual(requeue_message._requeue_jobs, [
            QueuedJob(job_2.id, job_2.num_exes),
            QueuedJob(job_1.id, job_1.num_exes)
        ])
        self.assertEqual(requeue_message.priority, 10001)
예제 #10
0
    def process_parsed_source_file(self, source_file):
        '''Processes the given parsed source file by checking it against all parse trigger rules and creating the
        corresponding jobs and recipes for any triggered rules. All database changes are made in an atomic transaction.

        :param source_file: The source file that was parsed
        :type source_file: :class:`source.models.SourceFile`
        '''

        msg = 'Processing trigger rules for parsed source file with media type %s and data types %s'
        logger.info(msg, source_file.media_type, str(list(source_file.get_data_type_tags())))

        any_rules = False
        for entry in RecipeType.objects.get_active_trigger_rules(PARSE_TYPE):
            rule = entry[0]
            thing_to_create = entry[1]
            rule_config = rule.get_configuration()
            condition = rule_config.get_condition()

            if condition.is_condition_met(source_file):
                logger.info(condition.get_triggered_message())
                any_rules = True

                event = self._create_parse_trigger_event(source_file, rule)
                workspace = Workspace.objects.get(name=rule_config.get_workspace_name())

                if isinstance(thing_to_create, JobType):
                    job_type = thing_to_create
                    job_data = JobData({})
                    job_data.add_file_input(rule_config.get_input_data_name(), source_file.id)
                    job_type.get_job_interface().add_workspace_to_data(job_data, workspace.id)
                    logger.info('Queuing new job of type %s %s', job_type.name, job_type.version)
                    Queue.objects.queue_new_job(job_type, job_data.get_dict(), event)
                elif isinstance(thing_to_create, RecipeType):
                    recipe_type = thing_to_create
                    recipe_data = RecipeData({})
                    recipe_data.add_file_input(rule_config.get_input_data_name(), source_file.id)
                    recipe_data.set_workspace_id(workspace.id)
                    logger.info('Queuing new recipe of type %s %s', recipe_type.name, recipe_type.version)
                    Queue.objects.queue_new_recipe(recipe_type, recipe_data.get_dict(), event)

        if not any_rules:
            logger.info('No rules triggered')
예제 #11
0
    def test_execute(self):
        """Tests calling FailedJobs.execute() successfully"""

        error_1 = error_test_utils.create_error(should_be_retried=True)
        error_2 = error_test_utils.create_error(should_be_retried=False)

        data = JobData()
        job_1 = job_test_utils.create_job(num_exes=1,
                                          status='QUEUED',
                                          data=data.get_dict(),
                                          max_tries=2)
        job_2 = job_test_utils.create_job(num_exes=1,
                                          status='RUNNING',
                                          data=data.get_dict(),
                                          max_tries=2)
        job_3 = job_test_utils.create_job(num_exes=1,
                                          status='RUNNING',
                                          data=data.get_dict(),
                                          max_tries=1)
        job_4 = job_test_utils.create_job(num_exes=1,
                                          status='RUNNING',
                                          data=data.get_dict(),
                                          max_tries=2)
        job_5 = job_test_utils.create_job(num_exes=1,
                                          status='RUNNING',
                                          data=data.get_dict(),
                                          max_tries=2)
        job_6 = job_test_utils.create_job(num_exes=1,
                                          status='FAILED',
                                          data=data.get_dict(),
                                          max_tries=2)
        job_7 = job_test_utils.create_job(num_exes=0, status='CANCELED')
        job_ids = [
            job_1.id, job_2.id, job_3.id, job_4.id, job_5.id, job_6.id,
            job_7.id
        ]

        from recipe.test import utils as recipe_test_utils
        recipe_1 = recipe_test_utils.create_recipe()
        recipe_test_utils.create_recipe_job(recipe=recipe_1, job=job_3)
        recipe_2 = recipe_test_utils.create_recipe()
        recipe_test_utils.create_recipe_job(recipe=recipe_2, job=job_4)

        when_ended = now()

        # Add jobs to message
        message = FailedJobs()
        message.ended = when_ended
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_1.id, job_1.num_exes, error_1.id))
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_2.id, job_2.num_exes, error_1.id))
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_3.id, job_3.num_exes, error_1.id))
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_4.id, job_4.num_exes,
                          error_2.id))  # Error that cannot be retried
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_5.id, job_5.num_exes - 1,
                          error_1.id))  # Mismatched exe_num
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_6.id, job_6.num_exes, error_1.id))
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_7.id, job_7.num_exes - 1, error_1.id))

        # Execute message
        result = message.execute()
        self.assertTrue(result)

        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        queued_jobs_msg = None
        update_recipes_msg = None
        self.assertEqual(len(message.new_messages), 2)
        for msg in message.new_messages:
            if msg.type == 'queued_jobs':
                queued_jobs_msg = msg
            elif msg.type == 'update_recipes':
                update_recipes_msg = msg
        self.assertEqual(len(queued_jobs_msg._queued_jobs),
                         2)  # 2 jobs should have been retried
        self.assertEqual(len(update_recipes_msg._recipe_ids),
                         2)  # 2 jobs should have been failed

        # Job 1 should be retried and put back on the queue
        self.assertEqual(jobs[0].status, 'QUEUED')
        self.assertEqual(jobs[0].num_exes, 1)
        self.assertEqual(queued_jobs_msg._queued_jobs[0].job_id, job_1.id)
        # Job 2 should be retried and put back on the queue
        self.assertEqual(jobs[1].status, 'RUNNING')
        self.assertEqual(jobs[1].num_exes, 1)
        self.assertEqual(queued_jobs_msg._queued_jobs[1].job_id, job_2.id)
        # Job 3 should be failed since max_tries is used up
        self.assertEqual(jobs[2].status, 'FAILED')
        self.assertEqual(jobs[2].num_exes, 1)
        self.assertEqual(jobs[2].error_id, error_1.id)
        self.assertEqual(jobs[2].ended, when_ended)
        self.assertTrue(recipe_1.id in update_recipes_msg._recipe_ids)
        # Job 4 should be failed since error cannot be retried
        self.assertEqual(jobs[3].status, 'FAILED')
        self.assertEqual(jobs[3].num_exes, 1)
        self.assertEqual(jobs[3].error_id, error_2.id)
        self.assertEqual(jobs[3].ended, when_ended)
        self.assertTrue(recipe_2.id in update_recipes_msg._recipe_ids)
        # Job 5 should be ignored since mismatched exe_num
        self.assertEqual(jobs[4].status, 'RUNNING')
        self.assertEqual(jobs[4].num_exes, 1)
        # Job 6 should be ignored since it is already failed
        self.assertEqual(jobs[5].status, 'FAILED')
        self.assertEqual(jobs[5].num_exes, 1)
        # Job 6 should be ignored since it is canceled
        self.assertEqual(jobs[6].status, 'CANCELED')
        self.assertEqual(jobs[6].num_exes, 0)

        # Test executing message again
        message_json_dict = message.to_json()
        message = FailedJobs.from_json(message_json_dict)
        result = message.execute()
        self.assertTrue(result)

        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        self.assertEqual(len(message.new_messages), 1)
        queued_jobs_msg = message.new_messages[0]
        self.assertEqual(queued_jobs_msg.type, 'queued_jobs')
        # The same 2 jobs should have been retried
        self.assertEqual(len(queued_jobs_msg._queued_jobs), 2)

        # Job 1 should be retried and put back on the queue
        self.assertEqual(jobs[0].status, 'QUEUED')
        self.assertEqual(jobs[0].num_exes, 1)
        self.assertEqual(queued_jobs_msg._queued_jobs[0].job_id, job_1.id)
        # Job 2 should be retried and put back on the queue
        self.assertEqual(jobs[1].status, 'RUNNING')
        self.assertEqual(jobs[1].num_exes, 1)
        self.assertEqual(queued_jobs_msg._queued_jobs[1].job_id, job_2.id)
        # Job 3 should be failed from first execution
        self.assertEqual(jobs[2].status, 'FAILED')
        self.assertEqual(jobs[2].num_exes, 1)
        self.assertEqual(jobs[2].error_id, error_1.id)
        # Job 4 should be failed from first execution
        self.assertEqual(jobs[3].status, 'FAILED')
        self.assertEqual(jobs[3].num_exes, 1)
        self.assertEqual(jobs[3].error_id, error_2.id)
        # Job 5 should be ignored since mismatched exe_num
        self.assertEqual(jobs[4].status, 'RUNNING')
        self.assertEqual(jobs[4].num_exes, 1)
        # Job 6 should be ignored since it is already failed
        self.assertEqual(jobs[5].status, 'FAILED')
        self.assertEqual(jobs[5].num_exes, 1)
        # Job 6 should be ignored since it is canceled
        self.assertEqual(jobs[6].status, 'CANCELED')
        self.assertEqual(jobs[6].num_exes, 0)
예제 #12
0
    def test_json(self):
        """Tests coverting a FailedJobs message to and from JSON"""

        error = error_test_utils.create_error(should_be_retried=True)

        data = JobData()
        job_1 = job_test_utils.create_job(num_exes=1,
                                          status='QUEUED',
                                          data=data.get_dict(),
                                          max_tries=2)
        job_2 = job_test_utils.create_job(num_exes=1,
                                          status='RUNNING',
                                          data=data.get_dict(),
                                          max_tries=1)
        job_3 = job_test_utils.create_job(num_exes=0, status='PENDING')
        job_ids = [job_1.id, job_2.id, job_3.id]

        from recipe.test import utils as recipe_test_utils
        recipe_1 = recipe_test_utils.create_recipe()
        recipe_test_utils.create_recipe_job(recipe=recipe_1, job=job_2)

        when_ended = now()

        # Add jobs to message
        message = FailedJobs()
        message.ended = when_ended
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_1.id, job_1.num_exes, error.id))
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_2.id, job_2.num_exes, error.id))
        if message.can_fit_more():
            message.add_failed_job(
                FailedJob(job_3.id, job_3.num_exes, error.id))

        # Convert message to JSON and back, and then execute
        message_json_dict = message.to_json()
        new_message = FailedJobs.from_json(message_json_dict)
        result = new_message.execute()

        self.assertTrue(result)
        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        queued_jobs_msg = None
        update_recipes_msg = None
        self.assertEqual(len(new_message.new_messages), 2)
        for msg in new_message.new_messages:
            if msg.type == 'queued_jobs':
                queued_jobs_msg = msg
            elif msg.type == 'update_recipes':
                update_recipes_msg = msg
        # Job 1 should be retried and put back on the queue
        self.assertEqual(jobs[0].status, 'QUEUED')
        self.assertEqual(jobs[0].num_exes, 1)
        self.assertEqual(len(queued_jobs_msg._queued_jobs), 1)
        self.assertEqual(queued_jobs_msg._queued_jobs[0].job_id, job_1.id)
        # Job 2 should be failed since max_tries is used up
        self.assertEqual(jobs[1].status, 'FAILED')
        self.assertEqual(jobs[1].num_exes, 1)
        self.assertEqual(jobs[1].error_id, error.id)
        self.assertEqual(jobs[1].ended, when_ended)
        self.assertEqual(len(update_recipes_msg._recipe_ids), 1)
        self.assertTrue(recipe_1.id in update_recipes_msg._recipe_ids)
        # Job 3 should ignore update
        self.assertEqual(jobs[2].status, 'PENDING')
        self.assertEqual(jobs[2].num_exes, 0)
예제 #13
0
    def test_execute(self):
        """Tests calling QueuedJobs.execute() successfully"""

        data = JobData()
        job_1 = job_test_utils.create_job(num_exes=0,
                                          status='PENDING',
                                          input=data.get_dict())
        job_2 = job_test_utils.create_job(num_exes=1,
                                          status='FAILED',
                                          input=data.get_dict())
        job_3 = job_test_utils.create_job(num_exes=1,
                                          status='RUNNING',
                                          input=data.get_dict())
        job_4 = job_test_utils.create_job(num_exes=1,
                                          status='CANCELED',
                                          input=data.get_dict())
        job_5 = job_test_utils.create_job(num_exes=1,
                                          status='QUEUED',
                                          input=data.get_dict())
        job_6 = job_test_utils.create_job(num_exes=1,
                                          status='COMPLETED',
                                          input=data.get_dict())
        job_7 = job_test_utils.create_job(num_exes=1,
                                          status='RUNNING',
                                          input=data.get_dict())
        job_8 = job_test_utils.create_job(num_exes=0, status='CANCELED')
        job_ids = [
            job_1.id, job_2.id, job_3.id, job_4.id, job_5.id, job_6.id,
            job_7.id, job_8.id
        ]

        # Add jobs to message
        message = QueuedJobs()
        message.priority = 101
        message.requeue = True  # The message is re-queuing so only jobs that have been queued before may be re-queued
        if message.can_fit_more():
            message.add_job(job_1.id, job_1.num_exes)
        if message.can_fit_more():
            message.add_job(job_2.id, job_2.num_exes)
        if message.can_fit_more():
            message.add_job(job_3.id, job_3.num_exes)
        if message.can_fit_more():
            message.add_job(job_4.id, job_4.num_exes)
        if message.can_fit_more():
            message.add_job(job_5.id, job_5.num_exes)
        if message.can_fit_more():
            message.add_job(job_6.id, job_6.num_exes)
        if message.can_fit_more():
            message.add_job(job_7.id, job_7.num_exes - 1)  # Mismatched exe_num
        if message.can_fit_more():
            message.add_job(job_8.id, job_8.num_exes)

        # Execute message
        result = message.execute()
        self.assertTrue(result)

        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        # Job 1 should not have been queued since the message is re-queuing and Job 1 has never been queued
        self.assertEqual(jobs[0].status, 'PENDING')
        self.assertEqual(jobs[0].num_exes, 0)
        # Job 2 should have been successfully QUEUED
        self.assertEqual(jobs[1].status, 'QUEUED')
        self.assertEqual(jobs[1].num_exes, 2)
        # Job 3 should have been successfully QUEUED
        self.assertEqual(jobs[2].status, 'QUEUED')
        self.assertEqual(jobs[2].num_exes, 2)
        # Job 4 should have been successfully QUEUED
        self.assertEqual(jobs[3].status, 'QUEUED')
        self.assertEqual(jobs[3].num_exes, 2)
        # Job 5 should have been successfully QUEUED
        self.assertEqual(jobs[4].status, 'QUEUED')
        self.assertEqual(jobs[4].num_exes, 2)
        # Job 6 should not have been queued since it is already completed
        self.assertEqual(jobs[5].status, 'COMPLETED')
        self.assertEqual(jobs[5].num_exes, 1)
        # Job 7 should not have been queued since it is an old message
        self.assertEqual(jobs[6].status, 'RUNNING')
        self.assertEqual(jobs[6].num_exes, 1)
        # Job 8 should not have been queued since it doesn't have any input data and has never been queued
        self.assertEqual(jobs[7].status, 'CANCELED')
        self.assertEqual(jobs[7].num_exes, 0)
        # Ensure priority is correctly set
        queue = Queue.objects.get(job_id=job_2.id)
        self.assertEqual(queue.priority, 101)

        # Test executing message again
        message_json_dict = message.to_json()
        message = QueuedJobs.from_json(message_json_dict)
        result = message.execute()
        self.assertTrue(result)

        self.assertTrue(result)
        # All results should be the same
        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        self.assertEqual(jobs[0].status, 'PENDING')
        self.assertEqual(jobs[0].num_exes, 0)
        self.assertEqual(jobs[1].status, 'QUEUED')
        self.assertEqual(jobs[1].num_exes, 2)
        self.assertEqual(jobs[2].status, 'QUEUED')
        self.assertEqual(jobs[2].num_exes, 2)
        self.assertEqual(jobs[3].status, 'QUEUED')
        self.assertEqual(jobs[3].num_exes, 2)
        self.assertEqual(jobs[4].status, 'QUEUED')
        self.assertEqual(jobs[4].num_exes, 2)
        self.assertEqual(jobs[5].status, 'COMPLETED')
        self.assertEqual(jobs[5].num_exes, 1)
        self.assertEqual(jobs[6].status, 'RUNNING')
        self.assertEqual(jobs[6].num_exes, 1)
        self.assertEqual(jobs[7].status, 'CANCELED')
        self.assertEqual(jobs[7].num_exes, 0)
예제 #14
0
    def test_execute(self):
        """Tests calling RequeueJobs.execute() successfully"""

        data = JobData()
        job_type = job_test_utils.create_seed_job_type(max_tries=3)
        job_1 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          input=data.get_dict())
        job_2 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          input=data.get_dict())
        job_3 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=1,
                                          status='COMPLETED',
                                          input=data.get_dict())
        job_4 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=0,
                                          status='PENDING',
                                          input=data.get_dict())
        job_5 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=0,
                                          status='CANCELED')
        job_ids = [job_1.id, job_2.id, job_3.id, job_4.id, job_5.id]

        # Add jobs to message
        message = RequeueJobs()
        message.priority = 101
        if message.can_fit_more():
            message.add_job(job_1.id, job_1.num_exes)
        if message.can_fit_more():
            message.add_job(job_2.id, job_2.num_exes - 1)  # Mismatched exe_num
        if message.can_fit_more():
            message.add_job(job_3.id, job_3.num_exes)
        if message.can_fit_more():
            message.add_job(job_4.id, job_4.num_exes)
        if message.can_fit_more():
            message.add_job(job_5.id, job_5.num_exes)

        # Execute message
        result = message.execute()
        self.assertTrue(result)

        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        # Job 1 should have been good (max_tries increased)
        self.assertEqual(jobs[0].max_tries, 6)
        # Job 2 had mismatched exe_num
        self.assertEqual(jobs[1].max_tries, 3)
        # Job 3 was already COMPLETED
        self.assertEqual(jobs[2].max_tries, 3)
        # Job 4 can't be re-queued since it had never been queued yet
        self.assertEqual(jobs[3].max_tries, 3)
        # Job 5 can't be re-queued since it had never been queued yet
        self.assertEqual(jobs[4].max_tries, 3)
        # Job 1 is only job that should be included in message to be queued
        self.assertEqual(len(message.new_messages), 2)
        queued_jobs_msg = message.new_messages[0]
        self.assertEqual(queued_jobs_msg.type, 'queued_jobs')
        self.assertListEqual(queued_jobs_msg._queued_jobs,
                             [QueuedJob(job_1.id, job_1.num_exes)])
        self.assertEqual(queued_jobs_msg.priority, 101)
        self.assertTrue(queued_jobs_msg.requeue)
        # Job 5 is only job that should be included in message to uncancel
        uncancel_jobs_msg = message.new_messages[1]
        self.assertEqual(uncancel_jobs_msg.type, 'uncancel_jobs')
        self.assertListEqual(uncancel_jobs_msg._job_ids, [job_5.id])

        # Test executing message again
        message.new_messages = []
        result = message.execute()
        self.assertTrue(result)

        # All results should be the same
        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        # Job 1 should have been good (max_tries increased)
        self.assertEqual(jobs[0].max_tries, 6)
        # Job 2 had mismatched exe_num
        self.assertEqual(jobs[1].max_tries, 3)
        # Job 3 was already COMPLETED
        self.assertEqual(jobs[2].max_tries, 3)
        # Job 4 can't be re-queued since it had never been queued yet
        self.assertEqual(jobs[3].max_tries, 3)
        # Job 5 can't be re-queued since it had never been queued yet
        self.assertEqual(jobs[4].max_tries, 3)
        # Job 1 is only job that should be included in message to be queued
        self.assertEqual(len(message.new_messages), 2)
        queued_jobs_msg = message.new_messages[0]
        self.assertEqual(queued_jobs_msg.type, 'queued_jobs')
        self.assertListEqual(queued_jobs_msg._queued_jobs,
                             [QueuedJob(job_1.id, job_1.num_exes)])
        self.assertEqual(queued_jobs_msg.priority, 101)
        self.assertTrue(queued_jobs_msg.requeue)
        # Job 5 is only job that should be included in message to uncancel
        uncancel_jobs_msg = message.new_messages[1]
        self.assertEqual(uncancel_jobs_msg.type, 'uncancel_jobs')
        self.assertListEqual(uncancel_jobs_msg._job_ids, [job_5.id])
예제 #15
0
    def test_execute(self):
        """Tests calling CancelJobs.execute() successfully"""

        when = now()
        data = JobData()
        from recipe.test import utils as recipe_test_utils
        recipe = recipe_test_utils.create_recipe()
        job_type = job_test_utils.create_seed_job_type()
        job_1 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          input=data.get_dict(),
                                          recipe=recipe)
        job_2 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='CANCELED',
                                          input=data.get_dict(),
                                          recipe=recipe)
        job_3 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=1,
                                          status='COMPLETED',
                                          input=data.get_dict(),
                                          recipe=recipe)
        job_4 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=0,
                                          status='PENDING',
                                          recipe=recipe)
        job_ids = [job_1.id, job_2.id, job_3.id, job_4.id]
        recipe_test_utils.create_recipe_job(recipe=recipe,
                                            job_name='job_1',
                                            job=job_1)
        recipe_test_utils.create_recipe_job(recipe=recipe,
                                            job_name='job_2',
                                            job=job_2)
        recipe_test_utils.create_recipe_job(recipe=recipe,
                                            job_name='job_3',
                                            job=job_3)
        recipe_test_utils.create_recipe_job(recipe=recipe,
                                            job_name='job_4',
                                            job=job_4)

        # Add jobs to message
        message = CancelJobs()
        message.when = when
        if message.can_fit_more():
            message.add_job(job_1.id)
        if message.can_fit_more():
            message.add_job(job_2.id)
        if message.can_fit_more():
            message.add_job(job_3.id)
        if message.can_fit_more():
            message.add_job(job_4.id)

        # Execute message
        result = message.execute()
        self.assertTrue(result)

        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        # Job 1 should have been canceled
        self.assertEqual(jobs[0].status, 'CANCELED')
        self.assertEqual(jobs[0].last_status_change, when)
        # Job 2 was already canceled
        self.assertEqual(jobs[1].status, 'CANCELED')
        self.assertNotEqual(jobs[1].last_status_change, when)
        # Job 3 was already COMPLETED, so can't be canceled
        self.assertEqual(jobs[2].status, 'COMPLETED')
        self.assertNotEqual(jobs[2].last_status_change, when)
        # Job 4 should have been canceled
        self.assertEqual(jobs[3].status, 'CANCELED')
        self.assertEqual(jobs[3].last_status_change, when)
        from recipe.diff.forced_nodes import ForcedNodes
        from recipe.diff.json.forced_nodes_v6 import convert_forced_nodes_to_v6
        forced_nodes = ForcedNodes()
        forced_nodes.set_all_nodes()
        forced_nodes_dict = convert_forced_nodes_to_v6(forced_nodes).get_dict()
        # Should be messages to update recipe and update recipe metrics after canceling jobs
        self.assertEqual(len(message.new_messages), 2)
        update_recipe_msg = None
        update_recipe_metrics_msg = None
        for msg in message.new_messages:
            if msg.type == 'update_recipe':
                update_recipe_msg = msg
            elif msg.type == 'update_recipe_metrics':
                update_recipe_metrics_msg = msg
        self.assertIsNotNone(update_recipe_msg)
        self.assertIsNotNone(update_recipe_metrics_msg)
        self.assertEqual(update_recipe_msg.root_recipe_id, recipe.id)
        self.assertDictEqual(
            convert_forced_nodes_to_v6(
                update_recipe_msg.forced_nodes).get_dict(), forced_nodes_dict)
        self.assertListEqual(update_recipe_metrics_msg._recipe_ids,
                             [recipe.id])

        # Test executing message again
        message.new_messages = []
        result = message.execute()
        self.assertTrue(result)

        # All results should be the same
        jobs = Job.objects.filter(id__in=job_ids).order_by('id')
        # Job 1 should have been canceled
        self.assertEqual(jobs[0].status, 'CANCELED')
        self.assertEqual(jobs[0].last_status_change, when)
        # Job 2 was already canceled
        self.assertEqual(jobs[1].status, 'CANCELED')
        self.assertNotEqual(jobs[1].last_status_change, when)
        # Job 3 was already COMPLETED, so can't be canceled
        self.assertEqual(jobs[2].status, 'COMPLETED')
        self.assertNotEqual(jobs[2].last_status_change, when)
        # Job 4 should have been canceled
        self.assertEqual(jobs[3].status, 'CANCELED')
        self.assertEqual(jobs[3].last_status_change, when)
        # Should be messages to update recipe and update recipe metrics after canceling jobs
        self.assertEqual(len(message.new_messages), 2)
        update_recipe_msg = None
        update_recipe_metrics_msg = None
        for msg in message.new_messages:
            if msg.type == 'update_recipe':
                update_recipe_msg = msg
            elif msg.type == 'update_recipe_metrics':
                update_recipe_metrics_msg = msg
        self.assertIsNotNone(update_recipe_msg)
        self.assertIsNotNone(update_recipe_metrics_msg)
        self.assertEqual(update_recipe_msg.root_recipe_id, recipe.id)
        self.assertDictEqual(
            convert_forced_nodes_to_v6(
                update_recipe_msg.forced_nodes).get_dict(), forced_nodes_dict)
        self.assertListEqual(update_recipe_metrics_msg._recipe_ids,
                             [recipe.id])
예제 #16
0
    def test_execute(self):
        """Tests calling RequeueJobsBulk.execute() successfully"""

        # Importing module here to patch the max batch size
        import queue.messages.requeue_jobs_bulk
        queue.messages.requeue_jobs_bulk.MAX_BATCH_SIZE = 5

        sys_err = error_test_utils.create_error(category='SYSTEM')

        data = JobData()
        job_type = job_test_utils.create_job_type()
        job_1 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          error=sys_err,
                                          input=data.get_dict())
        job_2 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          error=sys_err,
                                          input=data.get_dict())
        job_3 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=0,
                                          status='FAILED',
                                          error=sys_err)
        job_4 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          error=sys_err,
                                          input=data.get_dict())
        job_5 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='CANCELED',
                                          error=sys_err,
                                          input=data.get_dict())
        job_6 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          error=sys_err,
                                          input=data.get_dict())
        job_7 = job_test_utils.create_job(job_type=job_type,
                                          num_exes=3,
                                          status='FAILED',
                                          error=sys_err,
                                          input=data.get_dict())

        # Create message
        message = queue.messages.requeue_jobs_bulk.RequeueJobsBulk()
        message.error_ids = [sys_err.id]
        message.job_type_ids = [job_type.id]
        message.priority = 10001
        message.status = 'FAILED'

        # Execute message
        result = message.execute()
        self.assertTrue(result)

        # Should be two messages, one for next bulk re-queue and one for re-queuing the specific jobs
        self.assertEqual(len(message.new_messages), 2)
        requeue_bulk_message = message.new_messages[0]
        requeue_message = message.new_messages[1]
        self.assertEqual(requeue_bulk_message.type, 'requeue_jobs_bulk')
        self.assertEqual(requeue_bulk_message.current_job_id, job_2.id)
        self.assertEqual(requeue_message.type, 'requeue_jobs')
        # Job 5 is skipped due to CANCELED and job 3 has not been queued yet (forced illegal state)
        self.assertListEqual(requeue_message._requeue_jobs, [
            QueuedJob(job_7.id, job_7.num_exes),
            QueuedJob(job_6.id, job_6.num_exes),
            QueuedJob(job_4.id, job_4.num_exes),
            QueuedJob(job_2.id, job_2.num_exes)
        ])
        self.assertEqual(requeue_message.priority, 10001)

        # Test executing message again
        message.new_messages = []
        result = message.execute()
        self.assertTrue(result)

        # Should have same messages returned
        self.assertEqual(len(message.new_messages), 2)
        requeue_bulk_message = message.new_messages[0]
        requeue_message = message.new_messages[1]
        self.assertEqual(requeue_bulk_message.type, 'requeue_jobs_bulk')
        self.assertEqual(requeue_bulk_message.current_job_id, job_2.id)
        self.assertEqual(requeue_message.type, 'requeue_jobs')
        # Job 5 is skipped due to CANCELED and job 3 has not been queued yet (forced illegal state)
        self.assertListEqual(requeue_message._requeue_jobs, [
            QueuedJob(job_7.id, job_7.num_exes),
            QueuedJob(job_6.id, job_6.num_exes),
            QueuedJob(job_4.id, job_4.num_exes),
            QueuedJob(job_2.id, job_2.num_exes)
        ])
        self.assertEqual(requeue_message.priority, 10001)