コード例 #1
0
ファイル: filters.py プロジェクト: kedar700/awx-bitbucket
 def filter_queryset(self, request, queryset, view):
     try:
         types = None
         for key, value in request.query_params.items():
             if key == 'type':
                 if ',' in value:
                     types = value.split(',')
                 else:
                     types = (value,)
         if types:
             types_map = {}
             for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')):
                 ct_model = ct.model_class()
                 if not ct_model:
                     continue
                 ct_type = get_type_for_model(ct_model)
                 types_map[ct_type] = ct.pk
             model = queryset.model
             model_type = get_type_for_model(model)
             if 'polymorphic_ctype' in get_all_field_names(model):
                 types_pks = set([v for k, v in types_map.items() if k in types])
                 queryset = queryset.filter(polymorphic_ctype_id__in=types_pks)
             elif model_type in types:
                 queryset = queryset
             else:
                 queryset = queryset.none()
         return queryset
     except FieldError as e:
         # Return a 400 for invalid field names.
         raise ParseError(*e.args)
コード例 #2
0
    def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
        self.subsystem_metrics.inc("task_manager_tasks_started", 1)
        self.start_task_limit -= 1
        if self.start_task_limit == 0:
            # schedule another run immediately after this task manager
            schedule_task_manager()
        from awx.main.tasks.system import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                task.send_notification_templates('running')
                logger.debug('Transitioning %s to running status.', task.log_format)
                schedule_task_manager()
            # at this point we already have control/execution nodes selected for the following cases
            else:
                task.instance_group = instance_group
                execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
                logger.debug(
                    f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
                )
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()
                task.log_lifecycle("waiting")

        def post_commit():
            if task.status != 'failed' and type(task) is not WorkflowJob:
                # Before task is dispatched, ensure that job_event partitions exist
                create_partition(task.event_class._meta.db_table, start=task.created)
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
                    errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
                )

        task.websocket_emit_status(task.status)  # adds to on_commit
        connection.on_commit(post_commit)
コード例 #3
0
    def start_task(self, task, rampart_group, dependent_tasks=[]):
        from awx.main.tasks import handle_work_error, handle_work_success

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{
            'type': get_type_for_model(type(t)),
            'id': t.id
        } for t in dependent_tasks]

        error_handler = handle_work_error.s(subtasks=[task_actual] +
                                            dependencies)
        success_handler = handle_work_success.s(task_actual=task_actual)

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
            if not task.supports_isolation() and rampart_group.controller_id:
                # non-Ansible jobs on isolated instances run on controller
                task.instance_group = rampart_group.controller
                logger.info('Submitting isolated %s to queue %s via %s.',
                            task.log_format, task.instance_group_id,
                            rampart_group.controller_id)
            else:
                task.instance_group = rampart_group
                logger.info('Submitting %s to instance group %s.',
                            task.log_format, task.instance_group_id)
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            self.consume_capacity(task, rampart_group.name)

        def post_commit():
            task.websocket_emit_status(task.status)
            if task.status != 'failed':
                task.start_celery_task(opts,
                                       error_callback=error_handler,
                                       success_callback=success_handler,
                                       queue=rampart_group.name)

        connection.on_commit(post_commit)
コード例 #4
0
    def job_blocked_by(self, task):
        # TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
        #       in the old task manager this was handled as a method on each task object outside of the graph and
        #       probably has the side effect of cutting down *a lot* of the logic from this task manager class
        blocked_by = self.dependency_graph.task_blocked_by(task)
        if blocked_by:
            return blocked_by

        for dep in task.dependent_jobs.all():
            if dep.status in ACTIVE_STATES:
                return dep
            # if we detect a failed or error dependency, go ahead and fail this
            # task. The errback on the dependency takes some time to trigger,
            # and we don't want the task to enter running state if its
            # dependency has failed or errored.
            elif dep.status in ("error", "failed"):
                task.status = 'failed'
                task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
                    get_type_for_model(type(dep)),
                    dep.name,
                    dep.id,
                )
                task.save(update_fields=['status', 'job_explanation'])
                task.websocket_emit_status('failed')
                return dep

        return None
コード例 #5
0
 def log_format(self):
     return '{} {} ({})'.format(get_type_for_model(type(self)), self.id,
                                self.status)
コード例 #6
0
    def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
        self.start_task_limit -= 1
        if self.start_task_limit == 0:
            # schedule another run immediately after this task manager
            schedule_task_manager()
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                task.send_notification_templates('running')
                logger.debug('Transitioning %s to running status.', task.log_format)
                schedule_task_manager()
            elif rampart_group.is_container_group:
                task.instance_group = rampart_group
                if task.capacity_type == 'execution':
                    # find one real, non-containerized instance with capacity to
                    # act as the controller for k8s API interaction
                    try:
                        task.controller_node = Instance.choose_online_control_plane_node()
                        task.log_lifecycle("controller_node_chosen")
                    except IndexError:
                        logger.warning("No control plane nodes available to run containerized job {}".format(task.log_format))
                        return
                else:
                    # project updates and system jobs don't *actually* run in pods, so
                    # just pick *any* non-containerized host and use it as the execution node
                    task.execution_node = Instance.choose_online_control_plane_node()
                    task.log_lifecycle("execution_node_chosen")
                    logger.debug('Submitting containerized {} to queue {}.'.format(task.log_format, task.execution_node))
            else:
                task.instance_group = rampart_group
                task.execution_node = instance.hostname
                task.log_lifecycle("execution_node_chosen")
                if instance.node_type == 'execution':
                    try:
                        task.controller_node = Instance.choose_online_control_plane_node()
                        task.log_lifecycle("controller_node_chosen")
                    except IndexError:
                        logger.warning("No control plane nodes available to manage {}".format(task.log_format))
                        return
                else:
                    # control plane nodes will manage jobs locally for performance and resilience
                    task.controller_node = task.execution_node
                    task.log_lifecycle("controller_node_chosen")
                logger.debug('Submitting job {} to queue {} controlled by {}.'.format(task.log_format, task.execution_node, task.controller_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()
                task.log_lifecycle("waiting")

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name, instance=instance)

        def post_commit():
            if task.status != 'failed' and type(task) is not WorkflowJob:
                # Before task is dispatched, ensure that job_event partitions exist
                create_partition(task.event_class._meta.db_table, start=task.created)
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
                    errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
                )

        task.websocket_emit_status(task.status)  # adds to on_commit
        connection.on_commit(post_commit)
コード例 #7
0
    def get_summary_fields(self, obj):
        # code here to calculate the result
        # or return obj.calc_result() if you have that calculation in the model
        # datacenter_name = serializers.ReadOnlyField(source='datacenter.name',)
        # return "some result"

        summary_fields = OrderedDict()

        # fk = 'datacenter'
        # fields = ['id', 'name']
        
        # summary_fields[fk] = OrderedDict()
        
        # fkval = getattr(obj, fk, None)

        # for field in fields:
        #     fval = getattr(fkval, field, None)
        #     summary_fields[fk][field] = fval

        all_field_names = list(set(self.fields.keys()) - set(EXCLUDE_SUMMARY_FIELDS))
        summary_field_keys = list(SUMMARIZABLE_FK_FIELDS.keys())

        # summary_fields['all_fields'] =  all_field_names
        # summary_fields['summary_field_keys'] =  summary_field_keys
        # summary_fields['diff'] =  list(set(all_field_names) & set(summary_field_keys))

        field_names = list(set(all_field_names) & set(summary_field_keys))

        for fk in field_names:
            summary_fields[fk] = OrderedDict()
            fields = SUMMARIZABLE_FK_FIELDS[fk]
            fkval = getattr(obj, fk, None)

            for field in fields:
                fval = getattr(fkval, field, None)
                summary_fields[fk][field] = fval


        # Status
        fk = 'status'
        if fk in all_field_names:
            # fk = 'status'
            ftype_model = get_type_for_model(self.Meta.model)

            if ftype_model == 'prefix':
                STATUS_CHOICES = copy.deepcopy(STATUS_PREFIX_SUMMARY)
            elif ftype_model == 'ip_address':
                STATUS_CHOICES = copy.deepcopy(STATUS_IPADDRESS_SUMMARY)
            else:
                STATUS_CHOICES = copy.deepcopy(STATUS_PREFIX_SUMMARY)

            summary_fields[fk] = OrderedDict()
            fkval = getattr(obj, fk, None)

            fval = STATUS_CHOICES[fkval]

            fields = OrderedDict()
            fields['id'] = fkval
            fields['name'] = fval
            fields['type'] = ftype_model
            fields['debug'] = fkval #STATUS_CHOICES[fkval]

            
            summary_fields[fk] = fields



        return summary_fields
コード例 #8
0
 def get_type(self, obj):
     return get_type_for_model(self.Meta.model)
コード例 #9
0
ファイル: task_manager.py プロジェクト: sumit-21/awx
    def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        controller_node = None
        if task.supports_isolation() and rampart_group.controller_id:
            try:
                controller_node = rampart_group.choose_online_controller_node()
            except IndexError:
                logger.debug("No controllers available in group {} to run {}".format(
                             rampart_group.name, task.log_format))
                return

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                task.send_notification_templates('running')
                logger.debug('Transitioning %s to running status.', task.log_format)
                schedule_task_manager()
            elif not task.supports_isolation() and rampart_group.controller_id:
                # non-Ansible jobs on isolated instances run on controller
                task.instance_group = rampart_group.controller
                task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
                logger.debug('Submitting isolated {} to queue {} on node {}.'.format(
                             task.log_format, task.instance_group.name, task.execution_node))
            elif controller_node:
                task.instance_group = rampart_group
                task.execution_node = instance.hostname
                task.controller_node = controller_node
                logger.debug('Submitting isolated {} to queue {} controlled by {}.'.format(
                             task.log_format, task.execution_node, controller_node))
            elif rampart_group.is_containerized:
                # find one real, non-containerized instance with capacity to
                # act as the controller for k8s API interaction
                match = None
                for group in InstanceGroup.objects.all():
                    if group.is_containerized or group.controller_id:
                        continue
                    match = group.fit_task_to_most_remaining_capacity_instance(task)
                    if match:
                        break
                task.instance_group = rampart_group
                if match is None:
                    logger.warn(
                        'No available capacity to run containerized <{}>.'.format(task.log_format)
                    )
                else:
                    if task.supports_isolation():
                        task.controller_node = match.hostname
                    else:
                        # project updates and inventory updates don't *actually* run in pods,
                        # so just pick *any* non-isolated, non-containerized host and use it
                        # as the execution node
                        task.execution_node = match.hostname
                        logger.debug('Submitting containerized {} to queue {}.'.format(
                                     task.log_format, task.execution_node))
            else:
                task.instance_group = rampart_group
                if instance is not None:
                    task.execution_node = instance.hostname
                logger.debug('Submitting {} to <instance group, instance> <{},{}>.'.format(
                             task.log_format, task.instance_group_id, task.execution_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name)

        def post_commit():
            if task.status != 'failed' and type(task) is not WorkflowJob:
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{
                        'task': handle_work_success.name,
                        'kwargs': {'task_actual': task_actual}
                    }],
                    errbacks=[{
                        'task': handle_work_error.name,
                        'args': [task.celery_task_id],
                        'kwargs': {'subtasks': [task_actual] + dependencies}
                    }],
                )

        task.websocket_emit_status(task.status)  # adds to on_commit
        connection.on_commit(post_commit)
コード例 #10
0
ファイル: task_manager.py プロジェクト: ws-projects/awx
    def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        controller_node = None
        if task.supports_isolation() and rampart_group.controller_id:
            try:
                controller_node = rampart_group.choose_online_controller_node()
            except IndexError:
                logger.debug(six.text_type("No controllers available in group {} to run {}").format(
                             rampart_group.name, task.log_format))
                return

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                logger.info('Transitioning %s to running status.', task.log_format)
            elif not task.supports_isolation() and rampart_group.controller_id:
                # non-Ansible jobs on isolated instances run on controller
                task.instance_group = rampart_group.controller
                task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
                logger.info(six.text_type('Submitting isolated {} to queue {}.').format(
                            task.log_format, task.instance_group.name, task.execution_node))
            elif controller_node:
                task.instance_group = rampart_group
                task.execution_node = instance.hostname
                task.controller_node = controller_node
                logger.info(six.text_type('Submitting isolated {} to queue {} controlled by {}.').format(
                            task.log_format, task.execution_node, controller_node))
            else:
                task.instance_group = rampart_group
                if instance is not None:
                    task.execution_node = instance.hostname
                logger.info(six.text_type('Submitting {} to <instance group, instance> <{},{}>.').format(
                            task.log_format, task.instance_group_id, task.execution_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name)

        def post_commit():
            task.websocket_emit_status(task.status)
            if task.status != 'failed' and type(task) is not WorkflowJob:
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{
                        'task': handle_work_success.name,
                        'kwargs': {'task_actual': task_actual}
                    }],
                    errbacks=[{
                        'task': handle_work_error.name,
                        'args': [task.celery_task_id],
                        'kwargs': {'subtasks': [task_actual] + dependencies}
                    }],
                )

        connection.on_commit(post_commit)
コード例 #11
0
    def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
        self.start_task_limit -= 1
        if self.start_task_limit == 0:
            # schedule another run immediately after this task manager
            schedule_task_manager()
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                task.send_notification_templates('running')
                logger.debug('Transitioning %s to running status.', task.log_format)
                schedule_task_manager()
            elif rampart_group.is_container_group:
                # find one real, non-containerized instance with capacity to
                # act as the controller for k8s API interaction
                match = None
                for group in InstanceGroup.objects.filter(is_container_group=False):
                    match = group.fit_task_to_most_remaining_capacity_instance(task, group.instances.all())
                    if match:
                        break
                task.instance_group = rampart_group
                if match is None:
                    logger.warn('No available capacity to run containerized <{}>.'.format(task.log_format))
                elif task.can_run_containerized and any(ig.is_container_group for ig in task.preferred_instance_groups):
                    task.controller_node = match.hostname
                else:
                    # project updates and inventory updates don't *actually* run in pods, so
                    # just pick *any* non-containerized host and use it as the execution node
                    task.execution_node = match.hostname
                    logger.debug('Submitting containerized {} to queue {}.'.format(task.log_format, task.execution_node))
            else:
                task.instance_group = rampart_group
                if instance is not None:
                    task.execution_node = instance.hostname
                logger.debug('Submitting {} to <instance group, instance> <{},{}>.'.format(task.log_format, task.instance_group_id, task.execution_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()
                task.log_lifecycle("waiting")

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name)

        def post_commit():
            if task.status != 'failed' and type(task) is not WorkflowJob:
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
                    errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
                )

        task.websocket_emit_status(task.status)  # adds to on_commit
        connection.on_commit(post_commit)
コード例 #12
0
ファイル: task_manager.py プロジェクト: john-westcott-iv/awx
    def start_task(self,
                   task,
                   instance_group,
                   dependent_tasks=None,
                   instance=None):
        self.dependency_graph.add_job(task)
        self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1)
        self.start_task_limit -= 1
        if self.start_task_limit == 0:
            # schedule another run immediately after this task manager
            ScheduleTaskManager().schedule()
        from awx.main.tasks.system import handle_work_error, handle_work_success

        # update capacity for control node and execution node
        if task.controller_node:
            self.instances[task.controller_node].consume_capacity(
                settings.AWX_CONTROL_NODE_TASK_IMPACT)
        if task.execution_node:
            self.instances[task.execution_node].consume_capacity(
                task.task_impact)

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{
            'type': get_type_for_model(type(t)),
            'id': t.id
        } for t in dependent_tasks]

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                task.send_notification_templates('running')
                logger.debug('Transitioning %s to running status.',
                             task.log_format)
                # Call this to ensure Workflow nodes get spawned in timely manner
                ScheduleWorkflowManager().schedule()
            # at this point we already have control/execution nodes selected for the following cases
            else:
                task.instance_group = instance_group
                execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
                logger.debug(
                    f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
                )
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()
                task.log_lifecycle("waiting")

        # apply_async does a NOTIFY to the channel dispatcher is listening to
        # postgres will treat this as part of the transaction, which is what we want
        if task.status != 'failed' and type(task) is not WorkflowJob:
            task_cls = task._get_task_class()
            task_cls.apply_async(
                [task.pk],
                opts,
                queue=task.get_queue_name(),
                uuid=task.celery_task_id,
                callbacks=[{
                    'task': handle_work_success.name,
                    'kwargs': {
                        'task_actual': task_actual
                    }
                }],
                errbacks=[{
                    'task': handle_work_error.name,
                    'args': [task.celery_task_id],
                    'kwargs': {
                        'subtasks': [task_actual] + dependencies
                    }
                }],
            )

        # In exception cases, like a job failing pre-start checks, we send the websocket status message
        # for jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
        if task.status != 'waiting':
            task.websocket_emit_status(task.status)  # adds to on_commit