예제 #1
0
 def shutdown(self, driver):
     log.critical('Shutting down executor ...')
     for taskId in self.runningTasks.keys():
         self.killTask(driver, taskId)
     Resource.cleanSystem()
     BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
     log.critical('... executor shut down.')
예제 #2
0
파일: executor.py 프로젝트: PureQsh/toil
 def shutdown(self, driver):
     log.critical('Shutting down executor ...')
     for taskId in self.runningTasks.keys():
         self.killTask(driver, taskId)
     Resource.cleanSystem()
     BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
     log.critical('... executor shut down.')
예제 #3
0
    def shutdown(self) -> None:
        """Terminate cleanly and join daddy thread."""
        if self.daddyThread is not None:
            # Tell the daddy thread to stop.
            self.shuttingDown.set()
            # Wait for it to stop.
            self.daddyThread.join()

        BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
예제 #4
0
 def shutdown(self):
     """
     Cleanly terminate worker threads. Add sentinels to inputQueue equal to maxThreads. Join
     all worker threads.
     """
     # Remove reference to inputQueue (raises exception if inputQueue is used after method call)
     inputQueue = self.inputQueue
     self.inputQueue = None
     for i in range(self.numWorkers):
         inputQueue.put(None)
     for thread in self.workerThreads:
         thread.join()
     BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
예제 #5
0
 def shutdown(self):
     """
     Cleanly terminate worker threads. Add sentinels to inputQueue equal to maxThreads. Join
     all worker threads.
     """
     # Remove reference to inputQueue (raises exception if inputQueue is used after method call)
     inputQueue = self.inputQueue
     self.inputQueue = None
     for i in range(self.numWorkers):
         inputQueue.put(None)
     for thread in self.workerThreads:
         thread.join()
     BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
예제 #6
0
def executor():
    """
    Main function of the _toil_kubernetes_executor entrypoint.

    Runs inside the Toil container.

    Responsible for setting up the user script and running the command for the
    job (which may in turn invoke the Toil worker entrypoint).

    """

    logging.basicConfig(level=logging.DEBUG)
    logger.debug("Starting executor")
    
    # If we don't manage to run the child, what should our exit code be?
    exit_code = EXIT_STATUS_UNAVAILABLE_VALUE

    if len(sys.argv) != 2:
        logger.error('Executor requires exactly one base64-encoded argument')
        sys.exit(exit_code)

    # Take in a base64-encoded pickled dict as our first argument and decode it
    try:
        # Make sure to encode the text arguments to bytes before base 64 decoding
        job = pickle.loads(base64.b64decode(sys.argv[1].encode('utf-8')))
    except:
        exc_info = sys.exc_info()
        logger.error('Exception while unpickling task: ', exc_info=exc_info)
        sys.exit(exit_code)

    if 'environment' in job:
        # Adopt the job environment into the executor.
        # This lets us use things like TOIL_WORKDIR when figuring out how to talk to other executors.
        logger.debug('Adopting environment: %s', str(job['environment'].keys()))
        for var, value in job['environment'].items():
            os.environ[var] = value
    
    # Set JTRES_ROOT and other global state needed for resource
    # downloading/deployment to work.
    # TODO: Every worker downloads resources independently.
    # We should have a way to share a resource directory.
    logger.debug('Preparing system for resource download')
    Resource.prepareSystem()
    try:
        if 'userScript' in job:
            job['userScript'].register()
            
        # We need to tell other workers in this workflow not to do cleanup now that
        # we are here, or else wait for them to finish. So get the cleanup info
        # that knows where the work dir is.
        cleanupInfo = job['workerCleanupInfo']
        
        # Join a Last Process Standing arena, so we know which process should be
        # responsible for cleanup.
        # We need to use the real workDir, not just the override from cleanupInfo.
        # This needs to happen after the environment is applied.
        arena = LastProcessStandingArena(Toil.getToilWorkDir(cleanupInfo.workDir), 
            cleanupInfo.workflowID + '-kube-executor')
        arena.enter()
        try:
            
            # Start the child process
            logger.debug("Invoking command: '%s'", job['command'])
            child = subprocess.Popen(job['command'],
                                     preexec_fn=lambda: os.setpgrp(),
                                     shell=True)

            # Reproduce child's exit code
            exit_code = child.wait()
            
        finally:
            for _ in arena.leave():
                # We are the last concurrent executor to finish.
                # Do batch system cleanup.
                logger.debug('Cleaning up worker')
                BatchSystemSupport.workerCleanup(cleanupInfo)
    finally:
        logger.debug('Cleaning up resources')
        # TODO: Change resource system to use a shared resource directory for everyone.
        # Then move this into the last-process-standing cleanup
        Resource.cleanSystem()
        logger.debug('Shutting down')
        sys.exit(exit_code)