def findMesosBinary(self, names): if isinstance(names, basestring): # Handle a single string names = [names] for name in names: try: return which(name) except StopIteration: try: # Special case for users of PyCharm on OS X. This is where Homebrew installs # it. It's hard to set PATH for PyCharm (or any GUI app) on OS X so let's # make it easy for those poor souls. return which(name, path='/usr/local/sbin') except StopIteration: pass # If we get here, nothing we can use is present. We need to complain. if len(names) == 1: sought = "binary '%s'" % names[0] else: sought = 'any binary in %s' % str(names) raise RuntimeError( "Cannot find %s. Make sure Mesos is installed " "and it's 'bin' directory is present on the PATH." % sought)
def needs_mesos(test_item): """Use as a decorator before test classes or methods to run only if Mesos is installed.""" test_item = _mark_test('mesos', test_item) if not (which('mesos-master') or which('mesos-slave')): return unittest.skip( "Install Mesos (and Toil with the 'mesos' extra) to include this test." )(test_item) try: import pymesos import psutil except ImportError: return unittest.skip( "Install Mesos (and Toil with the 'mesos' extra) to include this test." )(test_item) return test_item
def needs_appliance(test_item): import json test_item = _mark_test('appliance', test_item) if less_strict_bool(os.getenv('TOIL_SKIP_DOCKER')): return unittest.skip('Skipping docker test.')(test_item) if which('docker'): image = applianceSelf() try: images = subprocess.check_output(['docker', 'inspect', image]).decode('utf-8') except subprocess.CalledProcessError: images = [] else: images = { i['Id'] for i in json.loads(images) if image in i['RepoTags'] } if len(images) == 0: return unittest.skip( "Cannot find appliance image %s. Use 'make test' target to " "automatically build appliance, or just run 'make docker' " "prior to running this test." % image)(test_item) elif len(images) == 1: return test_item else: assert False, 'Expected `docker inspect` to return zero or one image.' else: return unittest.skip('Install Docker to include this test.')(test_item)
def needs_slurm(test_item): """ Use as a decorator before test classes or methods to only run them if Slurm is installed. """ test_item = _mark_test('slurm', test_item) if which('squeue'): return test_item else: return unittest.skip("Install Slurm to include this test.")(test_item)
def needs_lsf(test_item): """ Use as a decorator before test classes or methods to only run them if LSF is installed. """ test_item = _mark_test('lsf', test_item) if which('bsub'): return test_item else: return unittest.skip("Install LSF to include this test.")(test_item)
def needs_parasol(test_item): """ Use as decorator so tests are only run if Parasol is installed. """ test_item = _mark_test('parasol', test_item) if which('parasol'): return test_item else: return unittest.skip("Install Parasol to include this test.")( test_item)
def needs_torque(test_item): """ Use as a decorator before test classes or methods to only run them if PBS/Torque is installed. """ test_item = _mark_test('torque', test_item) if which('pbsnodes'): return test_item else: return unittest.skip("Install PBS/Torque to include this test.")( test_item)
def needs_gridengine(test_item): """ Use as a decorator before test classes or methods to only run them if GridEngine is installed. """ test_item = _mark_test('gridengine', test_item) if which('qhost'): return test_item else: return unittest.skip("Install GridEngine to include this test.")( test_item)
def needs_docker(test_item): """ Use as a decorator before test classes or methods to only run them if docker is installed and docker-based tests are enabled. """ test_item = _mark_test('docker', test_item) if less_strict_bool(os.getenv('TOIL_SKIP_DOCKER')): return unittest.skip('Skipping docker test.')(test_item) if which('docker'): return test_item else: return unittest.skip("Install docker to include this test.")(test_item)
def needs_mesos(test_item): """ Use as a decorator before test classes or methods to only run them if the Mesos is installed and configured. """ test_item = _mark_test('mesos', test_item) if not (which('mesos-master') or which('mesos-slave')): return unittest.skip( "Install Mesos (and Toil with the 'mesos' extra) to include this test." )(test_item) try: # noinspection PyUnresolvedReferences import pymesos import psutil except ImportError: return unittest.skip( "Install Mesos (and Toil with the 'mesos' extra) to include this test." )(test_item) except: raise else: return test_item
def __init__(self, config, maxCores, maxMemory, maxDisk): super(ParasolBatchSystem, self).__init__(config, maxCores, maxMemory, maxDisk) if maxMemory != sys.maxsize: logger.warn('The Parasol batch system does not support maxMemory.') # Keep the name of the results file for the pstat2 command.. command = config.parasolCommand if os.path.sep not in command: try: command = which(command) except StopIteration: raise RuntimeError("Can't find %s on PATH." % command) logger.debug('Using Parasol at %s', command) self.parasolCommand = command jobStoreType, path = Toil.parseLocator(config.jobStore) if jobStoreType != 'file': raise RuntimeError( "The parasol batch system doesn't currently work with any " "jobStore type except file jobStores.") self.parasolResultsDir = tempfile.mkdtemp(dir=os.path.abspath(path)) logger.debug("Using parasol results dir: %s", self.parasolResultsDir) # In Parasol, each results file corresponds to a separate batch, and all jobs in a batch # have the same cpu and memory requirements. The keys to this dictionary are the (cpu, # memory) tuples for each batch. A new batch is created whenever a job has a new unique # combination of cpu and memory requirements. self.resultsFiles = dict() self.maxBatches = config.parasolMaxBatches # Allows the worker process to send back the IDs of jobs that have finished, so the batch # system can decrease its used cpus counter self.cpuUsageQueue = Queue() # Also stores finished job IDs, but is read by getUpdatedJobIDs(). self.updatedJobsQueue = Queue() # Use this to stop the worker when shutting down self.running = True self.worker = Thread(target=self.updatedJobWorker, args=()) self.worker.start() self.usedCpus = 0 self.jobIDsToCpu = {} # Set of jobs that have been issued but aren't known to have finished or been killed yet. # Jobs that end by themselves are removed in getUpdatedJob, and jobs that are killed are # removed in killBatchJobs. self.runningJobs = set()