Esempio n. 1
0
    def test_related_object(self):
        """Test that the `related_object` property is computed properly."""
        # Case 1: Job, identified by class_path.
        with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
                                                  "extras/tests/dummy_jobs")):
            job_class = get_job("local/test_pass/TestPass")
            job_result = JobResult(
                name=job_class.class_path,
                obj_type=ContentType.objects.get(app_label="extras",
                                                 model="job"),
                job_id=uuid.uuid4(),
            )

            # Can't just do self.assertEqual(job_result.related_object, job_class) here for some reason
            self.assertEqual(type(job_result.related_object), type)
            self.assertTrue(issubclass(job_result.related_object, Job))
            self.assertEqual(job_result.related_object.class_path,
                             "local/test_pass/TestPass")

            job_result.name = "local/no_such_job/NoSuchJob"
            self.assertIsNone(job_result.related_object)

            job_result.name = "not-a-class-path"
            self.assertIsNone(job_result.related_object)

        # Case 2: GitRepository, identified by name.
        repo = GitRepository(
            name="Test Git Repository",
            slug="test-git-repo",
            remote_url="http://localhost/git.git",
            username="******",
        )
        repo.save(trigger_resync=False)

        job_result = JobResult(
            name=repo.name,
            obj_type=ContentType.objects.get_for_model(repo),
            job_id=uuid.uuid4(),
        )

        self.assertEqual(job_result.related_object, repo)

        job_result.name = "No such GitRepository"
        self.assertIsNone(job_result.related_object)

        # Case 3: Related object with no name, identified by PK/ID
        ip_address = IPAddress.objects.create(address="1.1.1.1/32")
        job_result = JobResult(
            name="irrelevant",
            obj_type=ContentType.objects.get_for_model(ip_address),
            job_id=ip_address.pk,
        )

        self.assertEqual(job_result.related_object, ip_address)

        job_result.job_id = uuid.uuid4()
        self.assertIsNone(job_result.related_object)
Esempio n. 2
0
def enqueue_pull_git_repository_and_refresh_data(repository, request):
    """
    Convenience wrapper for JobResult.enqueue_job() to enqueue the pull_git_repository_and_refresh_data job.
    """
    git_repository_content_type = ContentType.objects.get_for_model(GitRepository)
    JobResult.enqueue_job(
        pull_git_repository_and_refresh_data,
        repository.name,
        git_repository_content_type,
        request.user,
        repository_pk=repository.pk,
        request=copy_safe_request(request),
    )
Esempio n. 3
0
def enqueue_git_repository_helper(repository, request, func, **kwargs):
    """
    Wrapper for JobResult.enqueue_job() to enqueue one of several possible Git repository functions.
    """
    git_repository_content_type = ContentType.objects.get_for_model(
        GitRepository)
    JobResult.enqueue_job(
        func,
        repository.name,
        git_repository_content_type,
        request.user,
        repository_pk=repository.pk,
        request=copy_safe_request(request),
    )
Esempio n. 4
0
    def dry_run(self, request, pk):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied()

        scheduled_job = get_object_or_404(ScheduledJob, pk=pk)
        job_class = get_job(scheduled_job.job_class)
        if job_class is None:
            raise Http404
        job = job_class()
        grouping, module, class_name = job_class.class_path.split("/", 2)

        # Immediately enqueue the job with commit=False
        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")
        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            scheduled_job.user,
            data=scheduled_job.kwargs["data"],
            request=copy_safe_request(request),
            commit=False,  # force a dry-run
        )
        serializer = serializers.JobResultSerializer(
            job_result, context={"request": request})

        return Response(serializer.data)
Esempio n. 5
0
    def run(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied("This user does not have permission to run jobs.")

        job_class = self._get_job_class(class_path)
        job = job_class()

        input_serializer = serializers.JobInputSerializer(data=request.data)
        input_serializer.is_valid(raise_exception=True)

        data = input_serializer.data["data"]
        commit = input_serializer.data["commit"]
        if commit is None:
            commit = getattr(job_class.Meta, "commit_default", True)

        job_content_type = ContentType.objects.get(app_label="extras", model="job")

        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            request.user,
            data=data,
            request=copy_safe_request(request),
            commit=commit,
        )
        job.result = job_result

        serializer = serializers.JobDetailSerializer(job, context={"request": request})

        return Response(serializer.data)
Esempio n. 6
0
def scheduled_job_handler(*args, **kwargs):
    """
    A thin wrapper around JobResult.enqueue_job() that allows for it to be called as an async task
    for the purposes of enqueuing scheduled jobs at their recurring intervals. Thus, JobResult.enqueue_job()
    is responsible for enqueuing the actual job for execution and this method is the task executed
    by the scheduler to kick off the job execution on a recurring interval.
    """
    from nautobot.extras.models import JobResult  # avoid circular import

    user_pk = kwargs.pop("user")
    user = User.objects.get(pk=user_pk)
    name = kwargs.pop("name")
    scheduled_job_pk = kwargs.pop("scheduled_job_pk")
    schedule = ScheduledJob.objects.get(pk=scheduled_job_pk)

    job_content_type = ContentType.objects.get(app_label="extras", model="job")
    JobResult.enqueue_job(run_job, name, job_content_type, user, schedule=schedule, **kwargs)
Esempio n. 7
0
    def run(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied(
                "This user does not have permission to run jobs.")

        job_class = self._get_job_class(class_path)
        job = job_class()

        input_serializer = serializers.JobInputSerializer(data=request.data)
        input_serializer.is_valid(raise_exception=True)

        data = input_serializer.data["data"] or {}
        commit = input_serializer.data["commit"]
        if commit is None:
            commit = getattr(job_class.Meta, "commit_default", True)

        try:
            job.validate_data(data)
        except FormsValidationError as e:
            # message_dict can only be accessed if ValidationError got a dict
            # in the constructor (saved as error_dict). Otherwise we get a list
            # of errors under messages
            return Response(
                {
                    "errors":
                    e.message_dict if hasattr(e, "error_dict") else e.messages
                },
                status=400)

        if not get_worker_count():
            raise CeleryWorkerNotRunningException()

        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")

        schedule = input_serializer.data.get("schedule")
        if schedule:
            schedule = self._create_schedule(schedule, data, commit, job,
                                             job_class, request)
        else:
            job_result = JobResult.enqueue_job(
                run_job,
                job.class_path,
                job_content_type,
                request.user,
                data=data,
                request=copy_safe_request(request),
                commit=commit,
            )
            job.result = job_result

        serializer = serializers.JobDetailSerializer(
            job, context={"request": request})

        return Response(serializer.data)
Esempio n. 8
0
    def setUp(self):
        self.user = User.objects.create_user(username="******")
        self.factory = RequestFactory()
        self.dummy_request = self.factory.get("/no-op/")
        self.dummy_request.user = self.user
        # Needed for use with the change_logging decorator
        self.dummy_request.id = uuid.uuid4()

        self.site = Site.objects.create(name="Test Site", slug="test-site")
        self.manufacturer = Manufacturer.objects.create(name="Acme",
                                                        slug="acme")
        self.device_type = DeviceType.objects.create(
            manufacturer=self.manufacturer,
            model="Frobozz 1000",
            slug="frobozz1000")
        self.role = DeviceRole.objects.create(name="router", slug="router")
        self.device_status = Status.objects.get_for_model(Device).get(
            slug="active")
        self.device = Device.objects.create(
            name="test-device",
            device_role=self.role,
            device_type=self.device_type,
            site=self.site,
            status=self.device_status,
        )

        self.repo = GitRepository(
            name="Test Git Repository",
            slug="test_git_repo",
            remote_url="http://localhost/git.git",
            # Provide everything we know we can provide
            provided_contents=[
                entry.content_identifier
                for entry in get_datasource_contents("extras.gitrepository")
            ],
        )
        self.repo.save(trigger_resync=False)

        self.job_result = JobResult(
            name=self.repo.name,
            obj_type=ContentType.objects.get_for_model(GitRepository),
            job_id=uuid.uuid4(),
        )
Esempio n. 9
0
    def run(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied(
                "This user does not have permission to run jobs.")

        # Check that at least one RQ worker is running
        if not Worker.count(get_connection("default")):
            raise RQWorkerNotRunningException()

        job_class = self._get_job_class(class_path)
        job = job_class()
        input_serializer = serializers.JobInputSerializer(data=request.data)

        if not input_serializer.is_valid():
            return Response(input_serializer.errors,
                            status=status.HTTP_400_BAD_REQUEST)

        data = input_serializer.data["data"]
        commit = input_serializer.data["commit"]
        if commit is None:
            commit = getattr(job_class.Meta, "commit_default", True)

        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")

        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            request.user,
            data=data,
            request=copy_safe_request(request),
            commit=commit,
        )
        job.result = job_result

        serializer = serializers.JobDetailSerializer(
            job, context={"request": request})

        return Response(serializer.data)
Esempio n. 10
0
    def dry_run(self, request, pk):
        scheduled_job = get_object_or_404(ScheduledJob, pk=pk)
        job_model = scheduled_job.job_model
        if job_model is None or not job_model.runnable:
            raise MethodNotAllowed("This job cannot be dry-run at this time.")
        if not Job.objects.check_perms(request.user, instance=job_model, action="run"):
            raise PermissionDenied("You do not have permission to run this job.")

        # Immediately enqueue the job with commit=False
        job_content_type = get_job_content_type()
        job_result = JobResult.enqueue_job(
            run_job,
            job_model.class_path,
            job_content_type,
            request.user,
            data=scheduled_job.kwargs.get("data", {}),
            request=copy_safe_request(request),
            commit=False,  # force a dry-run
        )
        serializer = serializers.JobResultSerializer(job_result, context={"request": request})

        return Response(serializer.data)
Esempio n. 11
0
    def handle(self, *args, **options):
        if "/" not in options["job"]:
            raise CommandError(
                'Job must be specified in the form "grouping_name/module_name/JobClassName"'
            )
        job_class = get_job(options["job"])
        if not job_class:
            raise CommandError('Job "%s" not found' % options["job"])

        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")

        # Run the job and create a new JobResult
        self.stdout.write("[{:%H:%M:%S}] Running {}...".format(
            timezone.now(), job_class.class_path))

        job_result = JobResult.enqueue_job(
            run_job,
            job_class.class_path,
            job_content_type,
            None,
            data=
            {},  # TODO: parsing CLI args into a data dictionary is not currently implemented
            request=None,
            commit=options["commit"],
        )

        # Wait on the job to finish
        while job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:
            time.sleep(1)
            job_result = JobResult.objects.get(pk=job_result.pk)

        # Report on success/failure
        for test_name, attrs in job_result.data.items():

            if test_name in ["total", "output"]:
                continue

            self.stdout.write(
                "\t{}: {} success, {} info, {} warning, {} failure".format(
                    test_name,
                    attrs["success"],
                    attrs["info"],
                    attrs["warning"],
                    attrs["failure"],
                ))

            for log_entry in attrs["log"]:
                status = log_entry[1]
                if status == "success":
                    status = self.style.SUCCESS(status)
                elif status == "info":
                    status = status
                elif status == "warning":
                    status = self.style.WARNING(status)
                elif status == "failure":
                    status = self.style.NOTICE(status)

                if log_entry[2]:  # object associated with log entry
                    self.stdout.write(
                        f"\t\t{status}: {log_entry[2]}: {log_entry[-1]}")
                else:
                    self.stdout.write(f"\t\t{status}: {log_entry[-1]}")

        if job_result.data["output"]:
            self.stdout.write(job_result.data["output"])

        if job_result.status == JobResultStatusChoices.STATUS_FAILED:
            status = self.style.ERROR("FAILED")
        elif job_result.status == JobResultStatusChoices.STATUS_ERRORED:
            status = self.style.ERROR("ERRORED")
        else:
            status = self.style.SUCCESS("SUCCESS")
        self.stdout.write("[{:%H:%M:%S}] {}: {}".format(
            timezone.now(), job_class.class_path, status))

        # Wrap things up
        self.stdout.write("[{:%H:%M:%S}] {}: Duration {}".format(
            timezone.now(), job_class.class_path, job_result.duration))
        self.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))
Esempio n. 12
0
def _run_job(request, job_model, legacy_response=False):
    """An internal function providing logic shared between JobModelViewSet.run() and JobViewSet.run()."""
    if not request.user.has_perm("extras.run_job"):
        raise PermissionDenied("This user does not have permission to run jobs.")
    if not job_model.enabled:
        raise PermissionDenied("This job is not enabled to be run.")
    if not job_model.installed:
        raise MethodNotAllowed(request.method, detail="This job is not presently installed and cannot be run")

    job_class = job_model.job_class
    if job_class is None:
        raise MethodNotAllowed(request.method, detail="This job's source code could not be located and cannot be run")
    job = job_class()

    input_serializer = serializers.JobInputSerializer(data=request.data)
    input_serializer.is_valid(raise_exception=True)

    data = input_serializer.data["data"] or {}
    commit = input_serializer.data["commit"]
    if commit is None:
        commit = job_model.commit_default

    try:
        job.validate_data(data)
    except FormsValidationError as e:
        # message_dict can only be accessed if ValidationError got a dict
        # in the constructor (saved as error_dict). Otherwise we get a list
        # of errors under messages
        return Response({"errors": e.message_dict if hasattr(e, "error_dict") else e.messages}, status=400)

    if not get_worker_count():
        raise CeleryWorkerNotRunningException()

    job_content_type = get_job_content_type()
    schedule_data = input_serializer.data.get("schedule")

    # Default to a null JobResult.
    job_result = None

    # Assert that a job with `approval_required=True` has a schedule that enforces approval and
    # executes immediately.
    if schedule_data is None and job_model.approval_required:
        schedule_data = {"interval": JobExecutionType.TYPE_IMMEDIATELY}

    # Try to create a ScheduledJob, or...
    if schedule_data:
        schedule = _create_schedule(schedule_data, data, commit, job, job_model, request)
    else:
        schedule = None

    # ... If we can't create one, create a JobResult instead.
    if schedule is None:
        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            request.user,
            data=data,
            request=copy_safe_request(request),
            commit=commit,
        )
        job.result = job_result

    if legacy_response:
        # Old-style JobViewSet response - serialize the Job class in the response for some reason?
        serializer = serializers.JobClassDetailSerializer(job, context={"request": request})
        return Response(serializer.data)
    else:
        # New-style JobModelViewSet response - serialize the schedule or job_result as appropriate
        data = {"schedule": None, "job_result": None}
        if schedule:
            data["schedule"] = nested_serializers.NestedScheduledJobSerializer(
                schedule, context={"request": request}
            ).data
        if job_result:
            data["job_result"] = nested_serializers.NestedJobResultSerializer(
                job_result, context={"request": request}
            ).data
        return Response(data, status=status.HTTP_201_CREATED)
Esempio n. 13
0
    def test_pull_git_repository_and_refresh_data_with_valid_data(
            self, MockGitRepo):
        """
        The test_pull_git_repository_and_refresh_data job should succeed if valid data is present in the repo.
        """
        with tempfile.TemporaryDirectory() as tempdir:
            with self.settings(GIT_ROOT=tempdir):

                def populate_repo(path, url):
                    os.makedirs(path)
                    # Just make config_contexts and export_templates directories as we don't load jobs
                    os.makedirs(os.path.join(path, "config_contexts"))
                    os.makedirs(
                        os.path.join(path, "config_contexts", "devices"))
                    os.makedirs(
                        os.path.join(path, "export_templates", "dcim",
                                     "device"))
                    with open(
                            os.path.join(path, "config_contexts",
                                         "context.yaml"), "w") as fd:
                        yaml.dump(
                            {
                                "_metadata": {
                                    "name": "Region NYC servers",
                                    "weight": 1500,
                                    "description":
                                    "NTP servers for region NYC",
                                    "is_active": True,
                                },
                                "ntp-servers":
                                ["172.16.10.22", "172.16.10.33"],
                            },
                            fd,
                        )
                    with open(
                            os.path.join(path, "config_contexts", "devices",
                                         "test-device.json"),
                            "w",
                    ) as fd:
                        json.dump({"dns-servers": ["8.8.8.8"]}, fd)
                    with open(
                            os.path.join(path, "export_templates", "dcim",
                                         "device", "template.j2"),
                            "w",
                    ) as fd:
                        fd.write(
                            "{% for device in queryset %}\n{{ device.name }}\n{% endfor %}"
                        )
                    return mock.DEFAULT

                MockGitRepo.side_effect = populate_repo
                MockGitRepo.return_value.checkout.return_value = self.COMMIT_HEXSHA

                pull_git_repository_and_refresh_data(self.repo.pk,
                                                     self.dummy_request,
                                                     self.job_result)

                self.assertEqual(
                    self.job_result.status,
                    JobResultStatusChoices.STATUS_COMPLETED,
                    self.job_result.data,
                )

                # Make sure ConfigContext was successfully loaded from file
                config_context = ConfigContext.objects.get(
                    name="Region NYC servers",
                    owner_object_id=self.repo.pk,
                    owner_content_type=ContentType.objects.get_for_model(
                        GitRepository),
                )
                self.assertIsNotNone(config_context)
                self.assertEqual(1500, config_context.weight)
                self.assertEqual("NTP servers for region NYC",
                                 config_context.description)
                self.assertTrue(config_context.is_active)
                self.assertEqual(
                    {"ntp-servers": ["172.16.10.22", "172.16.10.33"]},
                    config_context.data,
                )

                # Make sure Device local config context was successfully populated from file
                device = Device.objects.get(name=self.device.name)
                self.assertIsNotNone(device.local_context_data)
                self.assertEqual({"dns-servers": ["8.8.8.8"]},
                                 device.local_context_data)
                self.assertEqual(device.local_context_data_owner, self.repo)

                # Make sure ExportTemplate was successfully loaded from file
                export_template = ExportTemplate.objects.get(
                    owner_object_id=self.repo.pk,
                    owner_content_type=ContentType.objects.get_for_model(
                        GitRepository),
                    content_type=ContentType.objects.get_for_model(Device),
                    name="template.j2",
                )
                self.assertIsNotNone(export_template)

                # Now "resync" the repository, but now those files no longer exist in the repository
                def empty_repo(path, url):
                    os.remove(
                        os.path.join(path, "config_contexts", "context.yaml"))
                    os.remove(
                        os.path.join(path, "config_contexts", "devices",
                                     "test-device.json"))
                    os.remove(
                        os.path.join(path, "export_templates", "dcim",
                                     "device", "template.j2"))
                    return mock.DEFAULT

                MockGitRepo.side_effect = empty_repo
                # For verisimilitude, don't re-use the old request and job_result
                self.dummy_request.id = uuid.uuid4()
                self.job_result = JobResult(
                    name=self.repo.name,
                    obj_type=ContentType.objects.get_for_model(GitRepository),
                    job_id=uuid.uuid4(),
                )

                pull_git_repository_and_refresh_data(self.repo.pk,
                                                     self.dummy_request,
                                                     self.job_result)

                self.assertEqual(
                    self.job_result.status,
                    JobResultStatusChoices.STATUS_COMPLETED,
                    self.job_result.data,
                )

                # Verify that objects have been removed from the database
                self.assertEqual(
                    [],
                    list(
                        ConfigContext.objects.filter(
                            owner_content_type=ContentType.objects.
                            get_for_model(GitRepository),
                            owner_object_id=self.repo.pk,
                        )),
                )
                self.assertEqual(
                    [],
                    list(
                        ExportTemplate.objects.filter(
                            owner_content_type=ContentType.objects.
                            get_for_model(GitRepository),
                            owner_object_id=self.repo.pk,
                        )),
                )
                device = Device.objects.get(name=self.device.name)
                self.assertIsNone(device.local_context_data)
                self.assertIsNone(device.local_context_data_owner)
def job_runner(handle_class, job_class, device=None, user=None):
    """Function to make management command code more DRY."""
    data = {}

    if device:
        data["device"] = Device.objects.get(name=device)

    request = RequestFactory().request(SERVER_NAME="WebRequestContext")
    request.id = uuid.uuid4()
    request.user = User.objects.get(username=user)

    job_content_type = ContentType.objects.get(app_label="extras", model="job")

    # Run the job and create a new JobResult
    handle_class.stdout.write("[{:%H:%M:%S}] Running {}...".format(
        timezone.now(), job_class.class_path))

    job_result = JobResult.enqueue_job(
        run_job,
        job_class.class_path,
        job_content_type,
        request.user,
        data=data,
        request=request,
        commit=True,
    )

    # Wait on the job to finish
    while job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:
        time.sleep(1)
        job_result = JobResult.objects.get(pk=job_result.pk)

    # Report on success/failure
    for test_name, attrs in job_result.data.items():

        if test_name in ["total", "output"]:
            continue

        handle_class.stdout.write(
            "\t{}: {} success, {} info, {} warning, {} failure".format(
                test_name,
                attrs["success"],
                attrs["info"],
                attrs["warning"],
                attrs["failure"],
            ))

        for log_entry in attrs["log"]:
            status = log_entry[1]
            if status == "success":
                status = handle_class.style.SUCCESS(status)
            elif status == "info":
                status = status  # pylint: disable=self-assigning-variable
            elif status == "warning":
                status = handle_class.style.WARNING(status)
            elif status == "failure":
                status = handle_class.style.NOTICE(status)

            if log_entry[2]:  # object associated with log entry
                handle_class.stdout.write(
                    f"\t\t{status}: {log_entry[2]}: {log_entry[-1]}")
            else:
                handle_class.stdout.write(f"\t\t{status}: {log_entry[-1]}")

    if job_result.data["output"]:
        handle_class.stdout.write(job_result.data["output"])

    if job_result.status == JobResultStatusChoices.STATUS_FAILED:
        status = handle_class.style.ERROR("FAILED")
    elif job_result.status == JobResultStatusChoices.STATUS_ERRORED:
        status = handle_class.style.ERROR("ERRORED")
    else:
        status = handle_class.style.SUCCESS("SUCCESS")
    handle_class.stdout.write("[{:%H:%M:%S}] {}: {}".format(
        timezone.now(), job_class.class_path, status))

    # Wrap things up
    handle_class.stdout.write("[{:%H:%M:%S}] {}: Duration {}".format(
        timezone.now(), job_class.class_path, job_result.duration))
    handle_class.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))
Esempio n. 15
0
    def handle(self, *args, **options):
        if "/" not in options["job"]:
            raise CommandError(
                'Job must be specified in the form "grouping_name/module_name/JobClassName"'
            )
        job_class = get_job(options["job"])
        if not job_class:
            raise CommandError('Job "%s" not found' % options["job"])

        user = None
        request = None
        if options["commit"] and not options["username"]:
            # Job execution with commit=True uses change_logging(), which requires a user as the author of any changes
            raise CommandError("--username is mandatory when --commit is used")

        if options["username"]:
            User = get_user_model()
            try:
                user = User.objects.get(username=options["username"])
            except User.DoesNotExist as exc:
                raise CommandError("No such user") from exc

            request = RequestFactory().request(
                SERVER_NAME="nautobot_server_runjob")
            request.id = uuid.uuid4()
            request.user = user

        job_content_type = get_job_content_type()

        # Run the job and create a new JobResult
        self.stdout.write("[{:%H:%M:%S}] Running {}...".format(
            timezone.now(), job_class.class_path))

        job_result = JobResult.enqueue_job(
            run_job,
            job_class.class_path,
            job_content_type,
            user,
            data=
            {},  # TODO: parsing CLI args into a data dictionary is not currently implemented
            request=copy_safe_request(request) if request else None,
            commit=options["commit"],
        )

        # Wait on the job to finish
        while job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:
            time.sleep(1)
            job_result = JobResult.objects.get(pk=job_result.pk)

        # Report on success/failure
        groups = set(
            JobLogEntry.objects.filter(job_result=job_result).values_list(
                "grouping", flat=True))
        for group in sorted(groups):
            logs = JobLogEntry.objects.filter(job_result__pk=job_result.pk,
                                              grouping=group)
            success_count = logs.filter(
                log_level=LogLevelChoices.LOG_SUCCESS).count()
            info_count = logs.filter(
                log_level=LogLevelChoices.LOG_INFO).count()
            warning_count = logs.filter(
                log_level=LogLevelChoices.LOG_WARNING).count()
            failure_count = logs.filter(
                log_level=LogLevelChoices.LOG_FAILURE).count()

            self.stdout.write(
                "\t{}: {} success, {} info, {} warning, {} failure".format(
                    group,
                    success_count,
                    info_count,
                    warning_count,
                    failure_count,
                ))

            for log_entry in logs:
                status = log_entry.log_level
                if status == "success":
                    status = self.style.SUCCESS(status)
                elif status == "info":
                    status = status
                elif status == "warning":
                    status = self.style.WARNING(status)
                elif status == "failure":
                    status = self.style.NOTICE(status)

                if log_entry.log_object:
                    self.stdout.write(
                        f"\t\t{status}: {log_entry.log_object}: {log_entry.message}"
                    )
                else:
                    self.stdout.write(f"\t\t{status}: {log_entry.message}")

        if job_result.data["output"]:
            self.stdout.write(job_result.data["output"])

        if job_result.status == JobResultStatusChoices.STATUS_FAILED:
            status = self.style.ERROR("FAILED")
        elif job_result.status == JobResultStatusChoices.STATUS_ERRORED:
            status = self.style.ERROR("ERRORED")
        else:
            status = self.style.SUCCESS("SUCCESS")
        self.stdout.write("[{:%H:%M:%S}] {}: {}".format(
            timezone.now(), job_class.class_path, status))

        # Wrap things up
        self.stdout.write("[{:%H:%M:%S}] {}: Duration {}".format(
            timezone.now(), job_class.class_path, job_result.duration))
        self.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))