Beispiel #1
0
class ForwardModel(BaseCClass):
    TYPE_NAME = "forward_model"
    _free = QueuePrototype("void forward_model_free( forward_model )")
    _clear = QueuePrototype("void forward_model_clear(forward_model)")
    _add_job = QueuePrototype(
        "ext_job_ref forward_model_add_job(forward_model, char*)")
    _alloc_joblist = QueuePrototype(
        "stringlist_obj forward_model_alloc_joblist(forward_model)")
    _iget_job = QueuePrototype(
        "ext_job_ref forward_model_iget_job( forward_model, int)")

    def __init__(self):
        raise NotImplementedError("Class can not be instantiated directly!")

    def joblist(self):
        """ @rtype: StringList """
        return self._alloc_joblist()

    def iget_job(self, index):
        """ @rtype: ExtJob """
        return self._iget_job(index).setParent(self)

    def add_job(self, name):
        """ @rtype: ExtJob """
        return self._.add_job(name).setParent(self)

    def clear(self):
        self._clear()

    def free(self):
        self._free()
Beispiel #2
0
class ExtJoblist(BaseCClass):
    TYPE_NAME = "ext_joblist"
    _free = QueuePrototype("void ext_joblist_free( ext_joblist )")
    _alloc_list = QueuePrototype(
        "stringlist_ref ext_joblist_alloc_list(ext_joblist)")
    _get_job = QueuePrototype(
        "ext_job_ref ext_joblist_get_job(ext_joblist, char*)")
    _del_job = QueuePrototype("int ext_joblist_del_job(ext_joblist, char*)")
    _has_job = QueuePrototype("int ext_joblist_has_job(ext_joblist, char*)")
    _add_job = QueuePrototype(
        "void ext_joblist_add_job(ext_joblist, char*, ext_joblist)")
    _get_jobs = QueuePrototype("hash_ref ext_joblist_get_jobs(ext_joblist)")

    def __init__(self):
        raise NotImplementedError("Class can not be instantiated directly!")

    def get_jobs(self):
        """ @rtype: Hash """
        jobs = self._get_jobs()
        jobs.setParent(self)
        return jobs

    def getAvailableJobNames(self):
        """ @rtype: StringList """
        return self._alloc_list().setParent(self)

    def del_job(self, job):
        return self._del_job(job)

    def has_job(self, job):
        return self._has_job(job)

    def get_job(self, job):
        """ @rtype: ExtJob """
        return self._.get_job(job).setParent(self)

    def add_job(self, job_name, new_job):
        self._add_job(job_name, new_job)

    def free(self):
        self._free()
Beispiel #3
0
class ForwardModel(BaseCClass):
    TYPE_NAME = "forward_model"
    _alloc = QueuePrototype("void* forward_model_alloc(ext_joblist)",
                            bind=False)
    _free = QueuePrototype("void forward_model_free( forward_model )")
    _clear = QueuePrototype("void forward_model_clear(forward_model)")
    _add_job = QueuePrototype(
        "ext_job_ref forward_model_add_job(forward_model, char*)")
    _alloc_joblist = QueuePrototype(
        "stringlist_obj forward_model_alloc_joblist(forward_model)")
    _iget_job = QueuePrototype(
        "ext_job_ref forward_model_iget_job( forward_model, int)")
    _formatted_fprintf = QueuePrototype(
        "void forward_model_formatted_fprintf(forward_model, char*, subst_list, int)"
    )

    def __init__(self, ext_joblist):
        c_ptr = self._alloc(ext_joblist)
        super(ForwardModel, self).__init__(c_ptr)

    def joblist(self):
        """ @rtype: StringList """
        return self._alloc_joblist()

    def iget_job(self, index):
        """ @rtype: ExtJob """
        return self._iget_job(index).setParent(self)

    def add_job(self, name):
        """ @rtype: ExtJob """
        return self._add_job(name).setParent(self)

    def clear(self):
        self._clear()

    def free(self):
        self._free()

    def formatted_fprintf(self, path, global_args, unmask):
        self._formatted_fprintf(path, global_args, unmask)
Beispiel #4
0
class Workflow(BaseCClass):
    TYPE_NAME = "workflow"
    _alloc = QueuePrototype("void* workflow_alloc(char*, workflow_joblist)",
                            bind=False)
    _free = QueuePrototype("void     workflow_free(workflow)")
    _count = QueuePrototype("int      workflow_size(workflow)")
    _iget_job = QueuePrototype(
        "workflow_job_ref workflow_iget_job(workflow, int)")
    _iget_args = QueuePrototype(
        "stringlist_ref   workflow_iget_arguments(workflow, int)")

    _try_compile = QueuePrototype(
        "bool workflow_try_compile(workflow, subst_list)")
    _get_last_error = QueuePrototype(
        "config_error_ref workflow_get_last_error(workflow)")

    def __init__(self, src_file, job_list):
        """
        @type src_file: str
        @type job_list: WorkflowJoblist
        """
        c_ptr = self._alloc(src_file, job_list)
        super(Workflow, self).__init__(c_ptr)

        self.__running = False
        self.__cancelled = False
        self.__current_job = None

    def __len__(self):
        return self._count()

    def __getitem__(self, index):
        """
        @type index: int
        @rtype: tuple of (WorkflowJob, arguments)
        """
        job = self._iget_job(index)
        args = self._iget_args(index)
        return job, args

    def __iter__(self):
        for index in range(len(self)):
            yield self[index]

    def run(self, ert, verbose=False, context=None):
        """
        @type ert: ert.enkf.enkf_main.EnKFMain
        @type verbose: bool
        @type context: SubstitutionList
        @rtype: bool
        """
        self.__running = True
        success = self._try_compile(context)

        if success:
            for job, args in self:
                self.__current_job = job
                if not self.__cancelled:
                    return_value = job.run(ert, args, verbose)

                    if job.hasFailed():
                        print(return_value)

                    #todo store results?

        self.__current_job = None
        self.__running = False
        return success

    def free(self):
        self._free()

    def isRunning(self):
        return self.__running

    def cancel(self):
        if self.__current_job is not None:
            self.__current_job.cancel()

        self.__cancelled = True

    def isCancelled(self):
        return self.__cancelled

    def wait(self):
        while self.isRunning():
            time.sleep(1)

    def getLastError(self):
        """ @rtype: ConfigError """
        return self._get_last_error()

    @classmethod
    def createCReference(cls, c_pointer, parent=None):
        workflow = super(Workflow, cls).createCReference(c_pointer, parent)
        workflow.__running = False
        workflow.__cancelled = False
        workflow.__current_job = None
        return workflow
Beispiel #5
0
class WorkflowJob(BaseCClass):
    TYPE_NAME = "workflow_job"
    _alloc = QueuePrototype("void* workflow_job_alloc(char*, bool)",
                            bind=False)
    _alloc_parser = QueuePrototype(
        "config_parser_obj workflow_job_alloc_config( )", bind=False)
    _alloc_from_file = QueuePrototype(
        "workflow_job_obj workflow_job_config_alloc( char* , config_parser , char*)",
        bind=False)
    _free = QueuePrototype("void     workflow_job_free(workflow_job)")
    _name = QueuePrototype("char*    workflow_job_get_name(workflow_job)")
    _internal = QueuePrototype("bool     workflow_job_internal(workflow_job)")
    _is_internal_script = QueuePrototype(
        "bool   workflow_job_is_internal_script(workflow_job)")
    _get_internal_script = QueuePrototype(
        "char*  workflow_job_get_internal_script_path(workflow_job)")
    _get_function = QueuePrototype(
        "char*  workflow_job_get_function(workflow_job)")
    _get_module = QueuePrototype(
        "char*  workflow_job_get_module(workflow_job)")
    _get_executable = QueuePrototype(
        "char*  workflow_job_get_executable(workflow_job)")
    _min_arg = QueuePrototype("int  workflow_job_get_min_arg(workflow_job)")
    _max_arg = QueuePrototype("int  workflow_job_get_max_arg(workflow_job)")
    _arg_type = QueuePrototype(
        "config_content_type_enum workflow_job_iget_argtype(workflow_job, int)"
    )

    @classmethod
    def configParser(cls):
        return cls._alloc_parser()

    @classmethod
    def fromFile(cls, config_file, name=None, parser=None):
        if os.path.isfile(config_file) and os.access(config_file, os.R_OK):
            if parser is None:
                parser = cls.configParser()

            if name is None:
                name = os.path.basename(config_file)

            # NB: Observe argument reoredring.
            return cls._alloc_from_file(name, parser, config_file)
        else:
            raise IOError("Could not open config_file:%s" % config_file)

    def __init__(self, name, internal=True):
        c_ptr = self._alloc(name, internal)
        super(WorkflowJob, self).__init__(c_ptr)

        self.__script = None
        """ :type: ErtScript """
        self.__running = False

    def isInternal(self):
        """ @rtype: bool """
        return self._internal()

    def name(self):
        """ @rtype: str """
        return self._name()

    def minimumArgumentCount(self):
        """ @rtype: int """
        return self._min_arg()

    def maximumArgumentCount(self):
        """ @rtype: int """
        return self._max_arg()

    def functionName(self):
        """ @rtype: str """
        return self._get_function()

    def module(self):
        """ @rtype: str """
        return self._get_module()

    def executable(self):
        """ @rtype: str """
        return self._get_executable()

    def isInternalScript(self):
        """ @rtype: bool """
        return self._is_internal_script()

    def getInternalScriptPath(self):
        """ @rtype: str """
        return self._get_internal_script()

    def isPlugin(self):
        """ @rtype: bool """
        if self.isInternalScript():
            script_obj = ErtScript.loadScriptFromFile(
                self.getInternalScriptPath())
            return script_obj is not None and issubclass(script_obj, ErtPlugin)

        return False

    def argumentTypes(self):
        """ @rtype: list of type """

        result = []
        for index in range(self.maximumArgumentCount()):
            t = self._arg_type(index)
            if t == ContentTypeEnum.CONFIG_BOOL:
                result.append(bool)
            elif t == ContentTypeEnum.CONFIG_FLOAT:
                result.append(float)
            elif t == ContentTypeEnum.CONFIG_INT:
                result.append(int)
            elif t == ContentTypeEnum.CONFIG_STRING:
                result.append(str)
            else:
                result.append(None)

        return result

    def run(self, ert, arguments, verbose=False):
        """
        @type ert: ert.enkf.enkf_main.EnKFMain
        @type arguments: list of str
        @type verbose: bool
        @rtype: ctypes.c_void_p
        """
        self.__running = True

        min_arg = self.minimumArgumentCount()
        if min_arg > 0 and len(arguments) < min_arg:
            raise UserWarning(
                "The job: %s requires at least %d arguments, %d given." %
                (self.name(), min_arg, len(arguments)))

        max_arg = self.maximumArgumentCount()
        if 0 < max_arg < len(arguments):
            raise UserWarning(
                "The job: %s can only have %d arguments, %d given." %
                (self.name(), max_arg, len(arguments)))

        if self.isInternalScript():
            script_obj = ErtScript.loadScriptFromFile(
                self.getInternalScriptPath())
            self.__script = script_obj(ert)
            result = self.__script.initializeAndRun(self.argumentTypes(),
                                                    arguments,
                                                    verbose=verbose)

        elif self.isInternal() and not self.isInternalScript():
            self.__script = FunctionErtScript(ert,
                                              self.functionName(),
                                              self.argumentTypes(),
                                              argument_count=len(arguments))
            result = self.__script.initializeAndRun(self.argumentTypes(),
                                                    arguments,
                                                    verbose=verbose)

        elif not self.isInternal():
            self.__script = ExternalErtScript(ert, self.executable())
            result = self.__script.initializeAndRun(self.argumentTypes(),
                                                    arguments,
                                                    verbose=verbose)

        else:
            raise UserWarning("Unknown script type!")

        self.__running = False
        return result

    def cancel(self):
        if self.__script is not None:
            self.__script.cancel()

    def isRunning(self):
        return self.__running

    def isCancelled(self):
        return self.__script.isCancelled()

    def hasFailed(self):
        """ @rtype: bool """
        return self.__script.hasFailed()

    def free(self):
        self._free()

    @classmethod
    def createCReference(cls, c_pointer, parent=None):
        workflow = super(WorkflowJob, cls).createCReference(c_pointer, parent)
        workflow.__script = None
        workflow.__running = False
        return workflow
Beispiel #6
0
class JobQueue(BaseCClass):
    # If the queue is created with size == 0 that means that it will
    # just grow as needed; for the queue layer to know when to exit
    # you must call the function submit_complete() when you have no
    # more jobs to submit.
    #
    # If the number of jobs is known in advance you can create the
    # queue with a finite value for size, in that case it is not
    # necessary to explitly inform the queue layer when all jobs have
    # been submitted.
    TYPE_NAME = "job_queue"
    _alloc = QueuePrototype(
        "void* job_queue_alloc( int , char* , char* , char* )", bind=False)
    _start_user_exit = QueuePrototype(
        "bool job_queue_start_user_exit( job_queue )")
    _get_user_exit = QueuePrototype(
        "bool job_queue_get_user_exit( job_queue )")
    _free = QueuePrototype("void job_queue_free( job_queue )")
    _set_max_running = QueuePrototype(
        "void job_queue_set_max_running( job_queue , int)")
    _get_max_running = QueuePrototype(
        "int  job_queue_get_max_running( job_queue )")
    _set_max_job_duration = QueuePrototype(
        "void job_queue_set_max_job_duration( job_queue , int)")
    _get_max_job_duration = QueuePrototype(
        "int  job_queue_get_max_job_duration( job_queue )")
    _set_driver = QueuePrototype(
        "void job_queue_set_driver( job_queue , void* )")
    _add_job = QueuePrototype(
        "int  job_queue_add_job( job_queue , char* , void* , void* , void* , void* , int , char* , char* , int , char**)"
    )
    _kill_job = QueuePrototype("bool job_queue_kill_job( job_queue , int )")
    _start_queue = QueuePrototype(
        "void job_queue_run_jobs( job_queue , int , bool)")
    _run_jobs = QueuePrototype(
        "void job_queue_run_jobs_threaded(job_queue , int , bool)")
    _sim_start = QueuePrototype(
        "time_t job_queue_iget_sim_start( job_queue , int)")
    _iget_driver_data = QueuePrototype(
        "void* job_queue_iget_driver_data( job_queue , int)")

    _num_running = QueuePrototype(
        "int  job_queue_get_num_running( job_queue )")
    _num_complete = QueuePrototype(
        "int  job_queue_get_num_complete( job_queue )")
    _num_waiting = QueuePrototype(
        "int  job_queue_get_num_waiting( job_queue )")
    _num_pending = QueuePrototype(
        "int  job_queue_get_num_pending( job_queue )")

    _is_running = QueuePrototype("bool job_queue_is_running( job_queue )")
    _submit_complete = QueuePrototype(
        "void job_queue_submit_complete( job_queue )")
    _iget_sim_start = QueuePrototype(
        "time_t job_queue_iget_sim_start( job_queue , int)")
    _get_active_size = QueuePrototype(
        "int  job_queue_get_active_size( job_queue )")
    _get_pause = QueuePrototype("bool job_queue_get_pause(job_queue)")
    _set_pause_on = QueuePrototype("void job_queue_set_pause_on(job_queue)")
    _set_pause_off = QueuePrototype("void job_queue_set_pause_off(job_queue)")

    # The return type of the job_queue_iget_job_status should really
    # be the enum job_status_type_enum, but I just did not manage to
    # get the prototyping right. Have therefor taken the return as an
    # integer and convert it in the getJobStatus() method.
    _get_job_status = QueuePrototype(
        "int job_queue_iget_job_status(job_queue, int)")

    def __init__(self, driver, max_submit=1, size=0):
        """
        Short doc...
        
        The @size argument is used to say how many jobs the queue will
        run, in total.
    
              size = 0: That means that you do not tell the queue in
                advance how many jobs you have. The queue will just run
                all the jobs you add, but you have to inform the queue in
                some way that all jobs have been submitted. To achieve
                this you should call the submit_complete() method when all
                jobs have been submitted.#
    
              size > 0: The queue will know exactly how many jobs to run,
                and will continue until this number of jobs have completed
                - it is not necessary to call the submit_complete() method
                in this case.
            """

        OK_file = None
        status_file = None
        exit_file = None

        c_ptr = self._alloc(max_submit, OK_file, status_file, exit_file)
        super(JobQueue, self).__init__(c_ptr)
        self.size = size

        self.driver = driver
        self._set_driver(driver.from_param(driver))
        self.start(blocking=False)

    def kill_job(self, queue_index):
        """
        Will kill job nr @index.
        """
        self._kill_job(queue_index)

    def start(self, blocking=False):
        verbose = False
        self._run_jobs(self.size, verbose)

    def submit(self, cmd, run_path, job_name, argv, num_cpu=1):
        c_argv = (ctypes.c_char_p * len(argv))()
        c_argv[:] = argv

        done_callback = None
        callback_arg = None
        retry_callback = None
        exit_callback = None

        queue_index = self._add_job(cmd, done_callback, retry_callback,
                                    exit_callback, callback_arg, num_cpu,
                                    run_path, job_name, len(argv), c_argv)

        return queue_index

    def clear(self):
        pass

    def block_waiting(self):
        """
        Will block as long as there are waiting jobs.
        """
        while self.num_waiting > 0:
            time.sleep(1)

    def block(self):
        """
        Will block as long as there are running jobs.
        """
        while self.isRunning:
            time.sleep(1)

    def submit_complete(self):
        """
        Method to inform the queue that all jobs have been submitted.

        If the queue has been created with size == 0 the queue has no
        way of knowing when all jobs have completed; hence in that
        case you must call the submit_complete() method when all jobs
        have been submitted.

        If you know in advance exactly how many jobs you will run that
        should be specified with the size argument when creating the
        queue, in that case it is not necessary to call the
        submit_complete() method.
        """
        self._submit_complete()

    def isRunning(self):
        return self._is_running()

    def num_running(self):
        return self._num_running()

    def num_pending(self):
        return self._.num_pending()

    def num_waiting(self):
        return self._num_waiting()

    def num_complete(self):
        return self._num_complete()

    def exists(self, index):
        job = self.__getitem__(index)
        if job:
            return True
        else:
            return False

    def get_max_running(self):
        return self.driver.get_max_running()

    def set_max_running(self, max_running):
        self.driver.set_max_running(max_running)

    def get_max_job_duration(self):
        return self._get_max_job_duration()

    def set_max_job_duration(self, max_duration):
        self._set_max_job_duration(max_duration)

    def killAllJobs(self):
        # The queue will not set the user_exit flag before the
        # queue is in a running state. If the queue does not
        # change to running state within a timeout the C function
        # will return False, and that False value is just passed
        # along.
        user_exit = self._start_user_exit()
        if user_exit:
            while self.isRunning():
                time.sleep(0.1)
            return True
        else:
            return False

    def igetSimStart(self, job_index):
        return self._iget_sim_start(self, job_index)

    def getUserExit(self):
        # Will check if a user_exit has been initated on the job. The
        # queue can be queried about this status until a
        # job_queue_reset() call is invoked, and that should not be
        # done before the queue is recycled to run another batch of
        # simulations.
        return self._get_user_exit()

    def set_pause_on(self):
        self._set_pause_on()

    def set_pause_off(self):
        self._set_pause_off()

    def free(self):
        self._free()

    def __len__(self):
        return self._get_active_size()

    def getJobStatus(self, job_number):
        # See comment about return type in the prototype section at
        # the top of class.
        """ @rtype: JobStatusType """
        int_status = self._get_job_status(job_number)
        return JobStatusType(int_status)
Beispiel #7
0
class JobQueueManager(BaseCClass):
    TYPE_NAME = "job_queue_manager"
    _alloc = QueuePrototype("void* job_queue_manager_alloc( job_queue)",
                            bind=False)
    _free = QueuePrototype("void job_queue_manager_free( job_queue_manager )")
    _start_queue = QueuePrototype(
        "void job_queue_manager_start_queue( job_queue_manager , int , bool, bool)"
    )
    _get_num_waiting = QueuePrototype(
        "int job_queue_manager_get_num_waiting( job_queue_manager )")
    _get_num_running = QueuePrototype(
        "int job_queue_manager_get_num_running( job_queue_manager )")
    _get_num_success = QueuePrototype(
        "int job_queue_manager_get_num_success( job_queue_manager )")
    _get_num_failed = QueuePrototype(
        "int job_queue_manager_get_num_failed( job_queue_manager )")
    _is_running = QueuePrototype(
        "bool job_queue_manager_is_running( job_queue_manager )")
    _job_complete = QueuePrototype(
        "bool job_queue_manager_job_complete( job_queue_manager , int)")
    _job_running = QueuePrototype(
        "bool job_queue_manager_job_running( job_queue_manager , int)")

    # Note, even if all realizations have finished, they need not all be failed or successes.
    # That is how Ert report things. They can be "killed", which is neither success nor failure.
    _job_failed = QueuePrototype(
        "bool job_queue_manager_job_failed( job_queue_manager , int)")
    _job_waiting = QueuePrototype(
        "bool job_queue_manager_job_waiting( job_queue_manager , int)")
    _job_success = QueuePrototype(
        "bool job_queue_manager_job_success( job_queue_manager , int)")

    # The return type of the job_queue_manager_iget_job_status should
    # really be the enum job_status_type_enum, but I just did not
    # manage to get the prototyping right. Have therefor taken the
    # return as an integer and convert it in the getJobStatus()
    # method.
    _job_status = QueuePrototype(
        "int job_queue_manager_iget_job_status(job_queue_manager, int)")

    def __init__(self, queue):
        c_ptr = self._alloc(queue)
        super(JobQueueManager, self).__init__(c_ptr)

    def startQueue(self, total_size, verbose=False, reset_queue=True):
        self._start_queue(total_size, verbose, reset_queue)

    def getNumRunning(self):
        return self._get_num_running()

    def getNumWaiting(self):
        return self._get_num_waiting()

    def getNumSuccess(self):
        return self._get_num_success()

    def getNumFailed(self):
        return self._get_num_failed()

    def isRunning(self):
        return self._is_running()

    def free(self):
        self._free()

    def isJobComplete(self, job_index):
        return self._job_complete(job_index)

    def isJobRunning(self, job_index):
        return self._job_running(job_index)

    def isJobWaiting(self, job_index):
        return self._job_waiting(job_index)

    def didJobFail(self, job_index):
        return self._job_failed(job_index)

    def didJobSucceed(self, job_index):
        return self._job_success(job_index)

    def getJobStatus(self, job_index):
        # See comment about return type in the prototype section at
        # the top of class.
        """ @rtype: ert.job_queue.job_status_type_enum.JobStatusType """
        int_status = self._job_status(job_index)
        return JobStatusType(int_status)
Beispiel #8
0
class ExtJob(BaseCClass):
    TYPE_NAME = "ext_job"
    _alloc = QueuePrototype("void* ext_job_alloc(char*,char*, bool)",
                            bind=False)
    _fscanf_alloc = QueuePrototype(
        "void* ext_job_fscanf_alloc(char*, char*, bool, char* , bool)",
        bind=False)
    _free = QueuePrototype("void ext_job_free( ext_job )")
    _get_help_text = QueuePrototype("char* ext_job_get_help_text(ext_job)")
    _get_name = QueuePrototype("char* ext_job_get_name(ext_job)")
    _get_private_args_as_string = QueuePrototype(
        "char* ext_job_get_private_args_as_string(ext_job)")
    _set_private_args_as_string = QueuePrototype(
        "void ext_job_set_private_args_from_string(ext_job, char*)")
    _is_private = QueuePrototype("int ext_job_is_private(ext_job)")
    _get_config_file = QueuePrototype("char* ext_job_get_config_file(ext_job)")
    _set_config_file = QueuePrototype(
        "void ext_job_set_config_file(ext_job, char*)")
    _get_stdin_file = QueuePrototype("char* ext_job_get_stdin_file(ext_job)")
    _set_stdin_file = QueuePrototype(
        "void ext_job_set_stdin_file(ext_job, char*)")
    _get_stdout_file = QueuePrototype("char* ext_job_get_stdout_file(ext_job)")
    _set_stdout_file = QueuePrototype(
        "void ext_job_set_stdout_file(ext_job, char*)")
    _get_stderr_file = QueuePrototype("char* ext_job_get_stderr_file(ext_job)")
    _set_stderr_file = QueuePrototype(
        "void ext_job_set_stderr_file(ext_job, char*)")
    _get_target_file = QueuePrototype("char* ext_job_get_target_file(ext_job)")
    _set_target_file = QueuePrototype(
        "void ext_job_set_target_file(ext_job, char*)")
    _get_executable = QueuePrototype("char* ext_job_get_executable(ext_job)")
    _set_executable = QueuePrototype(
        "void ext_job_set_executable(ext_job, char*)")
    _get_error_file = QueuePrototype("char* ext_job_get_error_file(ext_job)")
    _get_start_file = QueuePrototype("char* ext_job_get_start_file(ext_job)")
    _get_max_running = QueuePrototype("int ext_job_get_max_running(ext_job)")
    _set_max_running = QueuePrototype(
        "void ext_job_set_max_running(ext_job, int)")
    _get_max_running_minutes = QueuePrototype(
        "int ext_job_get_max_running_minutes(ext_job)")
    _set_max_running_minutes = QueuePrototype(
        "void ext_job_set_max_running_minutes(ext_job, int)")
    _get_environment = QueuePrototype(
        "string_hash_ref ext_job_get_environment(ext_job)")
    _set_environment = QueuePrototype(
        "void ext_job_add_environment(ext_job, char*, char*)")
    _get_license_path = QueuePrototype(
        "char* ext_job_get_license_path(ext_job)")
    _get_arglist = QueuePrototype(
        "stringlist_ref ext_job_get_arglist(ext_job)")
    _clear_environment = QueuePrototype(
        "void ext_job_clear_environment(ext_job)")
    _save = QueuePrototype("void ext_job_save(ext_job)")

    def __init__(self,
                 config_file,
                 private,
                 name=None,
                 license_root_path=None,
                 search_PATH=True):
        if os.path.isfile(config_file):
            if name is None:
                name = os.path.basename(config_file)

            c_ptr = self._fscanf_alloc(name, license_root_path, private,
                                       config_file, search_PATH)
            super(ExtJob, self).__init__(c_ptr)
        else:
            raise IOError("No such file:%s" % config_file)

    def __str__(self):
        return "ExtJob(%s, config_file = %s)" % (self.name(),
                                                 self.get_config_file())

    def get_private_args_as_string(self):
        return self._get_private_args_as_string()

    def set_private_args_as_string(self, args):
        self._set_private_args_as_string(args)

    def get_help_text(self):
        return self._get_help_text()

    def is_private(self):
        return self._is_private()

    def get_config_file(self):
        return self._get_config_file()

    def set_config_file(self, config_file):
        self._set_config_file(config_file)

    def get_stdin_file(self):
        return self._get_stdin_file()

    def set_stdin_file(self, filename):
        self._set_stdin_file(filename)

    def get_stdout_file(self):
        return self._get_stdout_file()

    def set_stdout_file(self, filename):
        self._set_stdout_file(filename)

    def get_stderr_file(self):
        return self._get_stderr_file()

    def set_stderr_file(self, filename):
        self._set_stderr_file(filename)

    def get_target_file(self):
        return self._get_target_file()

    def set_target_file(self, filename):
        self._set_target_file(filename)

    def get_executable(self):
        return self._get_executable()

    def set_executable(self, executable):
        self._set_executable(executable)

    def get_max_running(self):
        return self._get_max_running()

    def set_max_running(self, max_running):
        self._set_max_running(max_running)

    def get_error_file(self):
        return self._get_error_file()

    def get_start_file(self):
        return self._get_start_file()

    def get_max_running_minutes(self):
        return self._get_max_running_minutes()

    def set_max_running_minutes(self, min_value):
        self._set_max_running_minutes(min_value)

    def get_environment(self):
        return self._get_environment()

    def set_environment(self, key, value):
        self._set_environment(key, value)

    def get_license_path(self):
        return self._get_license_path()

    def get_arglist(self):
        return self._get_arglist()

    def clear_environment(self):
        self._clear_environment()

    def save(self):
        self._save()

    def free(self):
        self._free()

    def name(self):
        return self._get_name()
Beispiel #9
0
class Driver(BaseCClass):
    TYPE_NAME = "driver"
    _alloc = QueuePrototype("void* queue_driver_alloc( queue_driver_enum )",
                            bind=False)
    _free = QueuePrototype("void queue_driver_free( driver )")
    _set_option = QueuePrototype(
        "void queue_driver_set_option( driver , char* , char*)")
    _submit = QueuePrototype(
        "void* queue_driver_submit_job( driver , char* , int , char* , char* , int , char**)"
    )
    _free_job = QueuePrototype("void   queue_driver_free_job( driver , job )")
    _get_status = QueuePrototype("int queue_driver_get_status( driver , job)")
    _kill_job = QueuePrototype("void queue_driver_kill_job( driver , job )")
    _get_max_running = QueuePrototype(
        "int queue_driver_get_max_running( driver )")
    _set_max_running = QueuePrototype(
        "void queue_driver_set_max_running( driver , int)")
    _get_name = QueuePrototype("char* queue_driver_get_name( driver )")

    def __init__(self, driver_type, max_running=1, options=None):
        """
        Creates a new driver instance
        """
        c_ptr = self._alloc(driver_type)
        super(Driver, self).__init__(c_ptr)
        if options:
            for (key, value) in options:
                self.set_option(key, value)
        self.set_max_running(max_running)

    def set_option(self, option, value):
        """
        Set the driver option @option to @value.

        If the option is succlessfully set the method will return True,
        otherwise the method will return False. If the @option is not
        recognized the method will return False. The supplied value
        should be a string.
        """
        return self._set_option(option, str(value))

    def is_driver_instance(self):
        return True

    def submit(self, name, cmd, run_path, argList, num_cpu=1, blocking=False):
        argc = len(argList)
        argv = (ctypes.c_char_p * argc)()
        argv[:] = map(str, argList)

        c_ptr = self._submit(cmd, num_cpu, run_path, name, argc, argv)
        job = Job(c_ptr, self)
        if blocking:
            job.block()
            job = None
        return job

    def free_job(self, job):
        self._free_job(job)

    def get_status(self, job):
        status = self._get_status(job)
        return status

    def kill_job(self, job):
        self._kill_job(job)

    def get_max_running(self):
        return self._get_max_running()

    def set_max_running(self, max_running):
        self._set_max_running(max_running)

    max_running = property(get_max_running, set_max_running)

    @property
    def name(self):
        return self._get_name(self)

    def free(self):
        self._free()
Beispiel #10
0
class ExtJoblist(BaseCClass):
    TYPE_NAME = "ext_joblist"
    _alloc = QueuePrototype("void* ext_joblist_alloc( )", bind=False)
    _free = QueuePrototype("void ext_joblist_free( ext_joblist )")
    _alloc_list = QueuePrototype(
        "stringlist_ref ext_joblist_alloc_list(ext_joblist)")
    _get_job = QueuePrototype(
        "ext_job_ref ext_joblist_get_job(ext_joblist, char*)")
    _del_job = QueuePrototype("int ext_joblist_del_job(ext_joblist, char*)")
    _has_job = QueuePrototype("int ext_joblist_has_job(ext_joblist, char*)")
    _add_job = QueuePrototype(
        "void ext_joblist_add_job(ext_joblist, char*, ext_job)")
    _get_jobs = QueuePrototype("hash_ref ext_joblist_get_jobs(ext_joblist)")
    _size = QueuePrototype("int ext_joblist_get_size(ext_joblist)")

    def __init__(self):
        c_ptr = self._alloc()
        super(ExtJoblist, self).__init__(c_ptr)

    def get_jobs(self):
        """ @rtype: Hash """
        jobs = self._get_jobs()
        jobs.setParent(self)
        return jobs

    def __len__(self):
        return self._size()

    def __contains__(self, job):
        return self._has_job(job)

    def __iter__(self):
        names = self.getAvailableJobNames()
        for job in names:
            yield self[job]

    def __getitem__(self, job):
        if job in self:
            return self._get_job(job).setParent(self)

    def getAvailableJobNames(self):
        """ @rtype: StringList """
        return self._alloc_list().setParent(self)

    def del_job(self, job):
        return self._del_job(job)

    def has_job(self, job):
        return job in self

    def get_job(self, job):
        """ @rtype: ExtJob """
        return self[job]

    def add_job(self, job_name, new_job):
        if not new_job.isReference():
            new_job.convertToCReference(self)

        self._add_job(job_name, new_job)

    def free(self):
        self._free()
Beispiel #11
0
class WorkflowJoblist(BaseCClass):
    TYPE_NAME = "workflow_joblist"
    _alloc = QueuePrototype("void*            workflow_joblist_alloc()",
                            bind=False)
    _free = QueuePrototype(
        "void             workflow_joblist_free(workflow_joblist)")
    _add_job = QueuePrototype(
        "void             workflow_joblist_add_job(workflow_joblist, workflow_job)"
    )
    _add_job_from_file = QueuePrototype(
        "bool             workflow_joblist_add_job_from_file(workflow_joblist, char*, char*)"
    )
    _has_job = QueuePrototype(
        "bool             workflow_joblist_has_job(workflow_joblist, char*)")
    _get_job = QueuePrototype(
        "workflow_job_ref workflow_joblist_get_job(workflow_joblist, char*)")
    _count = QueuePrototype(
        "workflow_job_ref workflow_joblist_get_job(workflow_joblist, char*)")

    def __init__(self):
        c_ptr = self._alloc()
        super(WorkflowJoblist, self).__init__(c_ptr)

    def addJob(self, job):
        """ @type job: WorkflowJob """
        job.convertToCReference(self)
        self._add_job(job)

    def addJobFromFile(self, name, filepath):
        """
         @type name: str
         @type filepath: str
         @rtype: bool
        """
        if not os.path.exists(filepath):
            raise UserWarning("Job file '%s' does not exist!" % filepath)

        return self._add_job_from_file(name, filepath)

    def __contains__(self, item):
        """
         @type item: str or WorkflowJob
         @rtype: bool
        """

        if isinstance(item, WorkflowJob):
            item = item.name()

        return self._has_job(item)

    def __getitem__(self, item):
        """
         @type item: str
         @rtype: WorkflowJob
        """

        if not item in self:
            return None

        return self._get_job(item)

    def free(self):
        self._free()