Пример #1
0
    def logTaskCreatedSQL(self):
        """Log task creation times for this group of tasks."""
        with self.groupTasksLock:
            self.finalised = True

            def insertTasks():
                with transaction.atomic():
                    for task in self.groupTasks:
                        self._log_task(self.linkTaskManager,
                                       task.commandReplacementDic, task.UUID,
                                       task.arguments)

            databaseFunctions.retryOnFailure("Insert tasks", insertTasks)
Пример #2
0
def execute_command(supported_modules, gearman_worker, gearman_job):
    """Execute the command encoded in ``gearman_job`` and return its exit code,
    standard output and standard error as a pickled dict.
    """
    logger.info("\n\n*** RUNNING TASK: %s", gearman_job.task)

    try:
        jobs = handle_batch_task(gearman_job, supported_modules)
        results = {}

        def write_task_results_callback():
            with transaction.atomic():
                for job in jobs:
                    logger.info("\n\n*** Completed job: %s", job.dump())

                    kwargs = {
                        "exitcode": job.get_exit_code(),
                        "endtime": getUTCDate()
                    }
                    if (django_settings.CAPTURE_CLIENT_SCRIPT_OUTPUT
                            or kwargs["exitcode"] > 0):
                        kwargs.update({
                            "stdout": job.get_stdout(),
                            "stderror": job.get_stderr()
                        })
                    Task.objects.filter(taskuuid=job.UUID).update(**kwargs)

                    results[job.UUID] = {"exitCode": job.get_exit_code()}

                    if job.caller_wants_output:
                        # Send back stdout/stderr so it can be written to files.
                        # Most cases don't require this (logging to the database is
                        # enough), but the ones that do are coordinated through the
                        # MCP Server so that multiple MCP Client instances don't try
                        # to write the same file at the same time.
                        results[job.UUID]["stdout"] = job.get_stdout()
                        results[job.UUID]["stderror"] = job.get_stderr()

        retryOnFailure("Write task results", write_task_results_callback)

        return cPickle.dumps({"task_results": results})
    except SystemExit:
        logger.error(
            "IMPORTANT: Task %s attempted to call exit()/quit()/sys.exit(). This module should be fixed!",
            gearman_job.task,
        )
        return fail_all_tasks(gearman_job, "Module attempted exit")
    except Exception as e:
        logger.exception("Exception while processing task %s: %s",
                         gearman_job.task, e)
        return fail_all_tasks(gearman_job, e)
Пример #3
0
def handle_batch_task(gearman_job, supported_modules):
    module_name = supported_modules.get(gearman_job.task)
    gearman_data = cPickle.loads(gearman_job.data)

    utc_date = getUTCDate()
    jobs = []
    for task_uuid in gearman_data["tasks"]:
        task_data = gearman_data["tasks"][task_uuid]
        arguments = task_data["arguments"]
        if isinstance(arguments, six.text_type):
            arguments = arguments.encode("utf-8")

        replacements = (replacement_dict.items() + {
            "%date%": utc_date.isoformat(),
            "%taskUUID%": task_uuid,
            "%jobCreatedDate%": task_data["createdDate"],
        }.items())

        for var, val in replacements:
            arguments = arguments.replace(var, val)

        job = Job(
            gearman_job.task,
            task_data["uuid"],
            _parse_command_line(arguments),
            caller_wants_output=task_data["wants_output"],
        )
        jobs.append(job)

    # Set their start times.  If we collide with the MCP Server inserting new
    # Tasks (which can happen under heavy concurrent load), retry as needed.
    def set_start_times():
        Task.objects.filter(taskuuid__in=[item.UUID for item in jobs]).update(
            starttime=utc_date)

    retryOnFailure("Set task start times", set_start_times)

    module = importlib.import_module("clientScripts." + module_name)

    # Our module can indicate that it should be run concurrently...
    if hasattr(module, "concurrent_instances"):
        fork_runner.call(
            "clientScripts." + module_name,
            jobs,
            task_count=module.concurrent_instances(),
        )
    else:
        module.call(jobs)

    return jobs
Пример #4
0
        def fail_all_tasks_callback():
            for task_uuid in gearman_data["tasks"]:
                Task.objects.filter(taskuuid=task_uuid).update(
                    stderror=str(reason), exitcode=1, endtime=getUTCDate())

            retryOnFailure("Fail all tasks", fail_all_tasks_callback)