Exemple #1
0
def enqueue_pull_git_repository_and_refresh_data(repository, request):
    """
    Convenience wrapper for JobResult.enqueue_job() to enqueue the pull_git_repository_and_refresh_data job.
    """
    git_repository_content_type = ContentType.objects.get_for_model(GitRepository)
    JobResult.enqueue_job(
        pull_git_repository_and_refresh_data,
        repository.name,
        git_repository_content_type,
        request.user,
        repository_pk=repository.pk,
        request=copy_safe_request(request),
    )
Exemple #2
0
def enqueue_git_repository_helper(repository, request, func, **kwargs):
    """
    Wrapper for JobResult.enqueue_job() to enqueue one of several possible Git repository functions.
    """
    git_repository_content_type = ContentType.objects.get_for_model(
        GitRepository)
    JobResult.enqueue_job(
        func,
        repository.name,
        git_repository_content_type,
        request.user,
        repository_pk=repository.pk,
        request=copy_safe_request(request),
    )
Exemple #3
0
    def dry_run(self, request, pk):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied()

        scheduled_job = get_object_or_404(ScheduledJob, pk=pk)
        job_class = get_job(scheduled_job.job_class)
        if job_class is None:
            raise Http404
        job = job_class()
        grouping, module, class_name = job_class.class_path.split("/", 2)

        # Immediately enqueue the job with commit=False
        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")
        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            scheduled_job.user,
            data=scheduled_job.kwargs["data"],
            request=copy_safe_request(request),
            commit=False,  # force a dry-run
        )
        serializer = serializers.JobResultSerializer(
            job_result, context={"request": request})

        return Response(serializer.data)
Exemple #4
0
    def run(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied("This user does not have permission to run jobs.")

        job_class = self._get_job_class(class_path)
        job = job_class()

        input_serializer = serializers.JobInputSerializer(data=request.data)
        input_serializer.is_valid(raise_exception=True)

        data = input_serializer.data["data"]
        commit = input_serializer.data["commit"]
        if commit is None:
            commit = getattr(job_class.Meta, "commit_default", True)

        job_content_type = ContentType.objects.get(app_label="extras", model="job")

        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            request.user,
            data=data,
            request=copy_safe_request(request),
            commit=commit,
        )
        job.result = job_result

        serializer = serializers.JobDetailSerializer(job, context={"request": request})

        return Response(serializer.data)
Exemple #5
0
def scheduled_job_handler(*args, **kwargs):
    """
    A thin wrapper around JobResult.enqueue_job() that allows for it to be called as an async task
    for the purposes of enqueuing scheduled jobs at their recurring intervals. Thus, JobResult.enqueue_job()
    is responsible for enqueuing the actual job for execution and this method is the task executed
    by the scheduler to kick off the job execution on a recurring interval.
    """
    from nautobot.extras.models import JobResult  # avoid circular import

    user_pk = kwargs.pop("user")
    user = User.objects.get(pk=user_pk)
    name = kwargs.pop("name")
    scheduled_job_pk = kwargs.pop("scheduled_job_pk")
    schedule = ScheduledJob.objects.get(pk=scheduled_job_pk)

    job_content_type = ContentType.objects.get(app_label="extras", model="job")
    JobResult.enqueue_job(run_job, name, job_content_type, user, schedule=schedule, **kwargs)
Exemple #6
0
    def run(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied(
                "This user does not have permission to run jobs.")

        job_class = self._get_job_class(class_path)
        job = job_class()

        input_serializer = serializers.JobInputSerializer(data=request.data)
        input_serializer.is_valid(raise_exception=True)

        data = input_serializer.data["data"] or {}
        commit = input_serializer.data["commit"]
        if commit is None:
            commit = getattr(job_class.Meta, "commit_default", True)

        try:
            job.validate_data(data)
        except FormsValidationError as e:
            # message_dict can only be accessed if ValidationError got a dict
            # in the constructor (saved as error_dict). Otherwise we get a list
            # of errors under messages
            return Response(
                {
                    "errors":
                    e.message_dict if hasattr(e, "error_dict") else e.messages
                },
                status=400)

        if not get_worker_count():
            raise CeleryWorkerNotRunningException()

        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")

        schedule = input_serializer.data.get("schedule")
        if schedule:
            schedule = self._create_schedule(schedule, data, commit, job,
                                             job_class, request)
        else:
            job_result = JobResult.enqueue_job(
                run_job,
                job.class_path,
                job_content_type,
                request.user,
                data=data,
                request=copy_safe_request(request),
                commit=commit,
            )
            job.result = job_result

        serializer = serializers.JobDetailSerializer(
            job, context={"request": request})

        return Response(serializer.data)
Exemple #7
0
    def run(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied(
                "This user does not have permission to run jobs.")

        # Check that at least one RQ worker is running
        if not Worker.count(get_connection("default")):
            raise RQWorkerNotRunningException()

        job_class = self._get_job_class(class_path)
        job = job_class()
        input_serializer = serializers.JobInputSerializer(data=request.data)

        if not input_serializer.is_valid():
            return Response(input_serializer.errors,
                            status=status.HTTP_400_BAD_REQUEST)

        data = input_serializer.data["data"]
        commit = input_serializer.data["commit"]
        if commit is None:
            commit = getattr(job_class.Meta, "commit_default", True)

        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")

        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            request.user,
            data=data,
            request=copy_safe_request(request),
            commit=commit,
        )
        job.result = job_result

        serializer = serializers.JobDetailSerializer(
            job, context={"request": request})

        return Response(serializer.data)
Exemple #8
0
    def dry_run(self, request, pk):
        scheduled_job = get_object_or_404(ScheduledJob, pk=pk)
        job_model = scheduled_job.job_model
        if job_model is None or not job_model.runnable:
            raise MethodNotAllowed("This job cannot be dry-run at this time.")
        if not Job.objects.check_perms(request.user, instance=job_model, action="run"):
            raise PermissionDenied("You do not have permission to run this job.")

        # Immediately enqueue the job with commit=False
        job_content_type = get_job_content_type()
        job_result = JobResult.enqueue_job(
            run_job,
            job_model.class_path,
            job_content_type,
            request.user,
            data=scheduled_job.kwargs.get("data", {}),
            request=copy_safe_request(request),
            commit=False,  # force a dry-run
        )
        serializer = serializers.JobResultSerializer(job_result, context={"request": request})

        return Response(serializer.data)
Exemple #9
0
    def handle(self, *args, **options):
        if "/" not in options["job"]:
            raise CommandError(
                'Job must be specified in the form "grouping_name/module_name/JobClassName"'
            )
        job_class = get_job(options["job"])
        if not job_class:
            raise CommandError('Job "%s" not found' % options["job"])

        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")

        # Run the job and create a new JobResult
        self.stdout.write("[{:%H:%M:%S}] Running {}...".format(
            timezone.now(), job_class.class_path))

        job_result = JobResult.enqueue_job(
            run_job,
            job_class.class_path,
            job_content_type,
            None,
            data=
            {},  # TODO: parsing CLI args into a data dictionary is not currently implemented
            request=None,
            commit=options["commit"],
        )

        # Wait on the job to finish
        while job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:
            time.sleep(1)
            job_result = JobResult.objects.get(pk=job_result.pk)

        # Report on success/failure
        for test_name, attrs in job_result.data.items():

            if test_name in ["total", "output"]:
                continue

            self.stdout.write(
                "\t{}: {} success, {} info, {} warning, {} failure".format(
                    test_name,
                    attrs["success"],
                    attrs["info"],
                    attrs["warning"],
                    attrs["failure"],
                ))

            for log_entry in attrs["log"]:
                status = log_entry[1]
                if status == "success":
                    status = self.style.SUCCESS(status)
                elif status == "info":
                    status = status
                elif status == "warning":
                    status = self.style.WARNING(status)
                elif status == "failure":
                    status = self.style.NOTICE(status)

                if log_entry[2]:  # object associated with log entry
                    self.stdout.write(
                        f"\t\t{status}: {log_entry[2]}: {log_entry[-1]}")
                else:
                    self.stdout.write(f"\t\t{status}: {log_entry[-1]}")

        if job_result.data["output"]:
            self.stdout.write(job_result.data["output"])

        if job_result.status == JobResultStatusChoices.STATUS_FAILED:
            status = self.style.ERROR("FAILED")
        elif job_result.status == JobResultStatusChoices.STATUS_ERRORED:
            status = self.style.ERROR("ERRORED")
        else:
            status = self.style.SUCCESS("SUCCESS")
        self.stdout.write("[{:%H:%M:%S}] {}: {}".format(
            timezone.now(), job_class.class_path, status))

        # Wrap things up
        self.stdout.write("[{:%H:%M:%S}] {}: Duration {}".format(
            timezone.now(), job_class.class_path, job_result.duration))
        self.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))
Exemple #10
0
def _run_job(request, job_model, legacy_response=False):
    """An internal function providing logic shared between JobModelViewSet.run() and JobViewSet.run()."""
    if not request.user.has_perm("extras.run_job"):
        raise PermissionDenied("This user does not have permission to run jobs.")
    if not job_model.enabled:
        raise PermissionDenied("This job is not enabled to be run.")
    if not job_model.installed:
        raise MethodNotAllowed(request.method, detail="This job is not presently installed and cannot be run")

    job_class = job_model.job_class
    if job_class is None:
        raise MethodNotAllowed(request.method, detail="This job's source code could not be located and cannot be run")
    job = job_class()

    input_serializer = serializers.JobInputSerializer(data=request.data)
    input_serializer.is_valid(raise_exception=True)

    data = input_serializer.data["data"] or {}
    commit = input_serializer.data["commit"]
    if commit is None:
        commit = job_model.commit_default

    try:
        job.validate_data(data)
    except FormsValidationError as e:
        # message_dict can only be accessed if ValidationError got a dict
        # in the constructor (saved as error_dict). Otherwise we get a list
        # of errors under messages
        return Response({"errors": e.message_dict if hasattr(e, "error_dict") else e.messages}, status=400)

    if not get_worker_count():
        raise CeleryWorkerNotRunningException()

    job_content_type = get_job_content_type()
    schedule_data = input_serializer.data.get("schedule")

    # Default to a null JobResult.
    job_result = None

    # Assert that a job with `approval_required=True` has a schedule that enforces approval and
    # executes immediately.
    if schedule_data is None and job_model.approval_required:
        schedule_data = {"interval": JobExecutionType.TYPE_IMMEDIATELY}

    # Try to create a ScheduledJob, or...
    if schedule_data:
        schedule = _create_schedule(schedule_data, data, commit, job, job_model, request)
    else:
        schedule = None

    # ... If we can't create one, create a JobResult instead.
    if schedule is None:
        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            request.user,
            data=data,
            request=copy_safe_request(request),
            commit=commit,
        )
        job.result = job_result

    if legacy_response:
        # Old-style JobViewSet response - serialize the Job class in the response for some reason?
        serializer = serializers.JobClassDetailSerializer(job, context={"request": request})
        return Response(serializer.data)
    else:
        # New-style JobModelViewSet response - serialize the schedule or job_result as appropriate
        data = {"schedule": None, "job_result": None}
        if schedule:
            data["schedule"] = nested_serializers.NestedScheduledJobSerializer(
                schedule, context={"request": request}
            ).data
        if job_result:
            data["job_result"] = nested_serializers.NestedJobResultSerializer(
                job_result, context={"request": request}
            ).data
        return Response(data, status=status.HTTP_201_CREATED)
def job_runner(handle_class, job_class, device=None, user=None):
    """Function to make management command code more DRY."""
    data = {}

    if device:
        data["device"] = Device.objects.get(name=device)

    request = RequestFactory().request(SERVER_NAME="WebRequestContext")
    request.id = uuid.uuid4()
    request.user = User.objects.get(username=user)

    job_content_type = ContentType.objects.get(app_label="extras", model="job")

    # Run the job and create a new JobResult
    handle_class.stdout.write("[{:%H:%M:%S}] Running {}...".format(
        timezone.now(), job_class.class_path))

    job_result = JobResult.enqueue_job(
        run_job,
        job_class.class_path,
        job_content_type,
        request.user,
        data=data,
        request=request,
        commit=True,
    )

    # Wait on the job to finish
    while job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:
        time.sleep(1)
        job_result = JobResult.objects.get(pk=job_result.pk)

    # Report on success/failure
    for test_name, attrs in job_result.data.items():

        if test_name in ["total", "output"]:
            continue

        handle_class.stdout.write(
            "\t{}: {} success, {} info, {} warning, {} failure".format(
                test_name,
                attrs["success"],
                attrs["info"],
                attrs["warning"],
                attrs["failure"],
            ))

        for log_entry in attrs["log"]:
            status = log_entry[1]
            if status == "success":
                status = handle_class.style.SUCCESS(status)
            elif status == "info":
                status = status  # pylint: disable=self-assigning-variable
            elif status == "warning":
                status = handle_class.style.WARNING(status)
            elif status == "failure":
                status = handle_class.style.NOTICE(status)

            if log_entry[2]:  # object associated with log entry
                handle_class.stdout.write(
                    f"\t\t{status}: {log_entry[2]}: {log_entry[-1]}")
            else:
                handle_class.stdout.write(f"\t\t{status}: {log_entry[-1]}")

    if job_result.data["output"]:
        handle_class.stdout.write(job_result.data["output"])

    if job_result.status == JobResultStatusChoices.STATUS_FAILED:
        status = handle_class.style.ERROR("FAILED")
    elif job_result.status == JobResultStatusChoices.STATUS_ERRORED:
        status = handle_class.style.ERROR("ERRORED")
    else:
        status = handle_class.style.SUCCESS("SUCCESS")
    handle_class.stdout.write("[{:%H:%M:%S}] {}: {}".format(
        timezone.now(), job_class.class_path, status))

    # Wrap things up
    handle_class.stdout.write("[{:%H:%M:%S}] {}: Duration {}".format(
        timezone.now(), job_class.class_path, job_result.duration))
    handle_class.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))
Exemple #12
0
    def handle(self, *args, **options):
        if "/" not in options["job"]:
            raise CommandError(
                'Job must be specified in the form "grouping_name/module_name/JobClassName"'
            )
        job_class = get_job(options["job"])
        if not job_class:
            raise CommandError('Job "%s" not found' % options["job"])

        user = None
        request = None
        if options["commit"] and not options["username"]:
            # Job execution with commit=True uses change_logging(), which requires a user as the author of any changes
            raise CommandError("--username is mandatory when --commit is used")

        if options["username"]:
            User = get_user_model()
            try:
                user = User.objects.get(username=options["username"])
            except User.DoesNotExist as exc:
                raise CommandError("No such user") from exc

            request = RequestFactory().request(
                SERVER_NAME="nautobot_server_runjob")
            request.id = uuid.uuid4()
            request.user = user

        job_content_type = get_job_content_type()

        # Run the job and create a new JobResult
        self.stdout.write("[{:%H:%M:%S}] Running {}...".format(
            timezone.now(), job_class.class_path))

        job_result = JobResult.enqueue_job(
            run_job,
            job_class.class_path,
            job_content_type,
            user,
            data=
            {},  # TODO: parsing CLI args into a data dictionary is not currently implemented
            request=copy_safe_request(request) if request else None,
            commit=options["commit"],
        )

        # Wait on the job to finish
        while job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:
            time.sleep(1)
            job_result = JobResult.objects.get(pk=job_result.pk)

        # Report on success/failure
        groups = set(
            JobLogEntry.objects.filter(job_result=job_result).values_list(
                "grouping", flat=True))
        for group in sorted(groups):
            logs = JobLogEntry.objects.filter(job_result__pk=job_result.pk,
                                              grouping=group)
            success_count = logs.filter(
                log_level=LogLevelChoices.LOG_SUCCESS).count()
            info_count = logs.filter(
                log_level=LogLevelChoices.LOG_INFO).count()
            warning_count = logs.filter(
                log_level=LogLevelChoices.LOG_WARNING).count()
            failure_count = logs.filter(
                log_level=LogLevelChoices.LOG_FAILURE).count()

            self.stdout.write(
                "\t{}: {} success, {} info, {} warning, {} failure".format(
                    group,
                    success_count,
                    info_count,
                    warning_count,
                    failure_count,
                ))

            for log_entry in logs:
                status = log_entry.log_level
                if status == "success":
                    status = self.style.SUCCESS(status)
                elif status == "info":
                    status = status
                elif status == "warning":
                    status = self.style.WARNING(status)
                elif status == "failure":
                    status = self.style.NOTICE(status)

                if log_entry.log_object:
                    self.stdout.write(
                        f"\t\t{status}: {log_entry.log_object}: {log_entry.message}"
                    )
                else:
                    self.stdout.write(f"\t\t{status}: {log_entry.message}")

        if job_result.data["output"]:
            self.stdout.write(job_result.data["output"])

        if job_result.status == JobResultStatusChoices.STATUS_FAILED:
            status = self.style.ERROR("FAILED")
        elif job_result.status == JobResultStatusChoices.STATUS_ERRORED:
            status = self.style.ERROR("ERRORED")
        else:
            status = self.style.SUCCESS("SUCCESS")
        self.stdout.write("[{:%H:%M:%S}] {}: {}".format(
            timezone.now(), job_class.class_path, status))

        # Wrap things up
        self.stdout.write("[{:%H:%M:%S}] {}: Duration {}".format(
            timezone.now(), job_class.class_path, job_result.duration))
        self.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))