예제 #1
0
def main():
    parser = parser_with_common_options(jobstore_option=True)
    parser.add_argument(
        "jobID",
        nargs=1,
        help=
        "The job store id of a job within the provided jobstore to run by itself."
    )
    parser.add_argument(
        "--printJobInfo",
        nargs=1,
        help=
        "Return information about this job to the user including preceding jobs, "
        "inputs, outputs, and runtime from the last known run.")

    options = parser.parse_args()
    set_logging_from_options(options)
    config = Config()
    config.setOptions(options)

    jobStore = Toil.resumeJobStore(config.jobStore)

    if options.printJobInfo:
        printContentsOfJobStore(jobStorePath=config.jobStore,
                                nameOfJob=options.printJobInfo)

    # TODO: Option to print list of successor jobs
    # TODO: Option to run job within python debugger, allowing step through of arguments
    # idea would be to have option to import pdb and set breakpoint at the start of the user's code

    jobID = options.jobID[0]
    logger.debug(f"Running the following job locally: {jobID}")
    workerScript(jobStore, config, jobID, jobID, redirectOutputToLogFile=False)
    logger.debug(f"Finished running: {jobID}")
예제 #2
0
    def _runDebugJob(self, jobCommand, jobID, environment):
        """
        Run the jobCommand right now, in the current thread.
        May only be called in debug-worker mode.
        Assumes resources are available.
        """

        assert self.debugWorker

        # TODO: It is not possible to kill running jobs in forkless mode,
        # because they are run immediately in the main thread.
        info = Info(time.time(), None, None, killIntended=False)
        self.runningJobs[jobID] = info

        if jobCommand.startswith("_toil_worker "):
            # We can actually run in this thread
            jobName, jobStoreLocator, jobStoreID = jobCommand.split()[
                1:]  # Parse command
            jobStore = Toil.resumeJobStore(jobStoreLocator)
            toil_worker.workerScript(
                jobStore,
                jobStore.config,
                jobName,
                jobStoreID,
                redirectOutputToLogFile=not self.debugWorker
            )  # Call the worker
        else:
            # Run synchronously. If starting or running the command fails, let the exception stop us.
            subprocess.check_call(jobCommand,
                                  shell=True,
                                  env=dict(os.environ, **environment))

        self.runningJobs.pop(jobID)
        if not info.killIntended:
            self.outputQueue.put((jobID, 0, time.time() - info.time))
예제 #3
0
 def _runWorker(self, jobCommand, jobID, environment):
     """
     Run the jobCommand using the worker and wait for it to finish.
     The worker is forked unless it is a '_toil_worker' job and
     debugWorker is True.
     """
     startTime = time.time()  # Time job is started
     if self.debugWorker and "_toil_worker" in jobCommand:
         # Run the worker without forking
         jobName, jobStoreLocator, jobStoreID = jobCommand.split()[
             1:]  # Parse command
         jobStore = Toil.resumeJobStore(jobStoreLocator)
         # TODO: The following does not yet properly populate self.runningJobs so it is not possible to kill
         # running jobs in forkless mode - see the "None" value in place of popen
         info = Info(time.time(), None, killIntended=False)
         try:
             self.runningJobs[jobID] = info
             try:
                 toil_worker.workerScript(
                     jobStore,
                     jobStore.config,
                     jobName,
                     jobStoreID,
                     redirectOutputToLogFile=not self.debugWorker
                 )  # Call the worker
             finally:
                 self.runningJobs.pop(jobID)
         finally:
             if not info.killIntended:
                 self.outputQueue.put((jobID, 0, time.time() - startTime))
     else:
         with self.popenLock:
             popen = subprocess.Popen(jobCommand,
                                      shell=True,
                                      env=dict(os.environ, **environment))
         info = Info(time.time(), popen, killIntended=False)
         try:
             self.runningJobs[jobID] = info
             try:
                 statusCode = popen.wait()
                 if statusCode != 0 and not info.killIntended:
                     log.error(
                         "Got exit code %i (indicating failure) "
                         "from job %s.", statusCode, self.jobs[jobID])
             finally:
                 self.runningJobs.pop(jobID)
         finally:
             if not info.killIntended:
                 self.outputQueue.put(
                     (jobID, statusCode, time.time() - startTime))
예제 #4
0
def main():
    parser = getBasicOptionParser()

    parser.add_argument(
        "jobStore",
        type=str,
        help="The location of the job store used by the workflow." +
        jobStoreLocatorHelp)
    parser.add_argument("jobID",
                        nargs=1,
                        help="The job store id of a job "
                        "within the provided jobstore to run by itself.")
    parser.add_argument(
        "--printJobInfo",
        nargs=1,
        help="Return information about this job to the user"
        " including preceding jobs, inputs, outputs, and runtime"
        " from the last known run.")
    parser.add_argument("--version", action='version', version=version)

    # Parse options
    options = parseBasicOptions(parser)
    config = Config()
    config.setOptions(options)

    # Load the job store
    jobStore = Toil.resumeJobStore(config.jobStore)

    if options.printJobInfo:
        printContentsOfJobStore(jobStorePath=options.jobStore,
                                nameOfJob=options.printJobInfo)

    # TODO: Option to print list of successor jobs
    # TODO: Option to run job within python debugger, allowing step through of arguments
    # idea would be to have option to import pdb and set breakpoint at the start of the user's code

    # Run the job locally
    jobID = options.jobID[0]
    logger.debug("Going to run the following job locally: %s", jobID)
    workerScript(jobStore, config, jobID, jobID, redirectOutputToLogFile=False)
    logger.debug("Ran the following job locally: %s", jobID)