Exemplo n.º 1
0
    def start_task(self, task, rampart_group, dependent_tasks=[]):
        from awx.main.tasks import handle_work_error, handle_work_success

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{
            'type': get_type_for_model(type(t)),
            'id': t.id
        } for t in dependent_tasks]

        error_handler = handle_work_error.s(subtasks=[task_actual] +
                                            dependencies)
        success_handler = handle_work_success.s(task_actual=task_actual)

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
            if not task.supports_isolation() and rampart_group.controller_id:
                # non-Ansible jobs on isolated instances run on controller
                task.instance_group = rampart_group.controller
                logger.info('Submitting isolated %s to queue %s via %s.',
                            task.log_format, task.instance_group_id,
                            rampart_group.controller_id)
            else:
                task.instance_group = rampart_group
                logger.info('Submitting %s to instance group %s.',
                            task.log_format, task.instance_group_id)
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            self.consume_capacity(task, rampart_group.name)

        def post_commit():
            task.websocket_emit_status(task.status)
            if task.status != 'failed':
                task.start_celery_task(opts,
                                       error_callback=error_handler,
                                       success_callback=success_handler,
                                       queue=rampart_group.name)

        connection.on_commit(post_commit)
Exemplo n.º 2
0
    def start_task(self,
                   task,
                   rampart_group,
                   dependent_tasks=None,
                   instance=None):
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{
            'type': get_type_for_model(type(t)),
            'id': t.id
        } for t in dependent_tasks]

        controller_node = None
        if task.supports_isolation() and rampart_group.controller_id:
            try:
                controller_node = rampart_group.choose_online_controller_node()
            except IndexError:
                logger.debug(
                    six.text_type(
                        "No controllers available in group {} to run {}").
                    format(rampart_group.name, task.log_format))
                return

        error_handler = handle_work_error.s(subtasks=[task_actual] +
                                            dependencies)
        success_handler = handle_work_success.s(task_actual=task_actual)

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                logger.info('Transitioning %s to running status.',
                            task.log_format)
            elif not task.supports_isolation() and rampart_group.controller_id:
                # non-Ansible jobs on isolated instances run on controller
                task.instance_group = rampart_group.controller
                task.execution_node = random.choice(
                    list(rampart_group.controller.instances.all().values_list(
                        'hostname', flat=True)))
                logger.info(
                    six.text_type(
                        'Submitting isolated {} to queue {}.').format(
                            task.log_format, task.instance_group.name,
                            task.execution_node))
            elif controller_node:
                task.instance_group = rampart_group
                task.execution_node = instance.hostname
                task.controller_node = controller_node
                logger.info(
                    six.text_type(
                        'Submitting isolated {} to queue {} controlled by {}.'
                    ).format(task.log_format, task.execution_node,
                             controller_node))
            else:
                task.instance_group = rampart_group
                if instance is not None:
                    task.execution_node = instance.hostname
                logger.info(
                    six.text_type(
                        'Submitting {} to <instance group, instance> <{},{}>.'
                    ).format(task.log_format, task.instance_group_id,
                             task.execution_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name)

        def post_commit():
            task.websocket_emit_status(task.status)
            if task.status != 'failed':
                task.start_celery_task(opts,
                                       error_callback=error_handler,
                                       success_callback=success_handler,
                                       queue=task.get_celery_queue_name())

        connection.on_commit(post_commit)