def main(): parser = getBasicOptionParser() parser.add_argument( "jobStore", type=str, help=( "Store in which to place job management files \ and the global accessed temporary files" "(If this is a file path this needs to be globally accessible " "by all machines running jobs).\n" "If the store already exists and restart is false an" " ExistingJobStoreException exception will be thrown." ), ) parser.add_argument("--version", action="version", version=version) options = parseBasicOptions(parser) jobStore = Toil.loadOrCreateJobStore(options.jobStore) logger.info("Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore) ####This behaviour is now broken batchSystem = Toil.createBatchSystem( jobStore.config ) # This should automatically kill the existing jobs.. so we're good. for jobID in batchSystem.getIssuedBatchJobIDs(): # Just in case we do it again. batchSystem.killBatchJobs(jobID) logger.info("All jobs SHOULD have been killed")
def main(): parser = getBasicOptionParser() parser.add_argument( "jobStore", type=str, help=("Store in which to place job management files \ and the global accessed temporary files" "(If this is a file path this needs to be globally accessible " "by all machines running jobs).\n" "If the store already exists and restart is false an" " JobStoreCreationException exception will be thrown.")) parser.add_argument("--version", action='version', version=version) options = parseBasicOptions(parser) jobStore = Toil.loadOrCreateJobStore(options.jobStore) logger.info( "Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore) ####This behaviour is now broken batchSystem = Toil.createBatchSystem( jobStore.config ) #This should automatically kill the existing jobs.. so we're good. for jobID in batchSystem.getIssuedBatchJobIDs( ): #Just in case we do it again. batchSystem.killBatchJobs(jobID) logger.info("All jobs SHOULD have been killed")
def main(): parser = parser_with_common_options() options = parser.parse_args() set_logging_from_options(options) config = Config() config.setOptions(options) config.jobStore = config.jobStore[5:] if config.jobStore.startswith('file:') else config.jobStore # ':' means an aws/google jobstore; use the old (broken?) method if ':' in config.jobStore: jobStore = Toil.resumeJobStore(config.jobStore) logger.info("Starting routine to kill running jobs in the toil workflow: %s", config.jobStore) # TODO: This behaviour is now broken: https://github.com/DataBiosphere/toil/commit/a3d65fc8925712221e4cda116d1825d4a1e963a1 batchSystem = Toil.createBatchSystem(jobStore.config) # Should automatically kill existing jobs, so we're good. for jobID in batchSystem.getIssuedBatchJobIDs(): # Just in case we do it again. batchSystem.killBatchJobs(jobID) logger.info("All jobs SHOULD have been killed") # otherwise, kill the pid recorded in the jobstore else: pid_log = os.path.join(os.path.abspath(config.jobStore), 'pid.log') with open(pid_log, 'r') as f: pid2kill = f.read().strip() try: os.kill(int(pid2kill), signal.SIGKILL) logger.info("Toil process %s successfully terminated." % str(pid2kill)) except OSError: logger.error("Toil process %s could not be terminated." % str(pid2kill)) raise
def main(): parser = getBasicOptionParser() parser.add_argument( "jobStore", type=str, help= "The location of the job store used by the workflow whose jobs should " "be killed." + jobStoreLocatorHelp) parser.add_argument("--version", action='version', version=version) options = parseBasicOptions(parser) jobStore = Toil.resumeJobStore(options.jobStore) logger.info( "Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore) ####This behaviour is now broken batchSystem = Toil.createBatchSystem( jobStore.config ) #This should automatically kill the existing jobs.. so we're good. for jobID in batchSystem.getIssuedBatchJobIDs( ): #Just in case we do it again. batchSystem.killBatchJobs(jobID) logger.info("All jobs SHOULD have been killed")
def main(): parser = getBasicOptionParser() parser.add_argument("jobStore", type=str, help="The location of the job store used by the workflow whose jobs should " "be killed." + jobStoreLocatorHelp) parser.add_argument("--version", action='version', version=version) options = parseBasicOptions(parser) jobStore = Toil.resumeJobStore(options.jobStore) logger.info("Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore) ####This behaviour is now broken batchSystem = Toil.createBatchSystem(jobStore.config) #This should automatically kill the existing jobs.. so we're good. for jobID in batchSystem.getIssuedBatchJobIDs(): #Just in case we do it again. batchSystem.killBatchJobs(jobID) logger.info("All jobs SHOULD have been killed")
def main() -> None: parser = parser_with_common_options() options = parser.parse_args() set_logging_from_options(options) config = Config() config.setOptions(options) job_store_type, _ = Toil.parseLocator(config.jobStore) if job_store_type != 'file': # Remote (aws/google) jobstore; use the old (broken?) method job_store = Toil.resumeJobStore(config.jobStore) logger.info("Starting routine to kill running jobs in the toil workflow: %s", config.jobStore) # TODO: This behaviour is now broken: https://github.com/DataBiosphere/toil/commit/a3d65fc8925712221e4cda116d1825d4a1e963a1 # There's no guarantee that the batch system in use can enumerate # running jobs belonging to the job store we've attached to. And # moreover we don't even bother trying to kill the leader at its # recorded PID, even if it is a local process. batch_system = Toil.createBatchSystem(job_store.config) # Should automatically kill existing jobs, so we're good. for job_id in batch_system.getIssuedBatchJobIDs(): # Just in case we do it again. batch_system.killBatchJobs([job_id]) logger.info("All jobs SHOULD have been killed") else: # otherwise, kill the pid recorded in the jobstore. # TODO: We assume thnis is a local PID. job_store = Toil.resumeJobStore(config.jobStore) assert isinstance(job_store, FileJobStore), "Need a FileJobStore which has a sharedFilesDir" pid_log = os.path.join(job_store.sharedFilesDir, 'pid.log') with open(pid_log) as f: pid_to_kill = f.read().strip() try: os.kill(int(pid_to_kill), signal.SIGTERM) logger.info("Toil process %s successfully terminated." % str(pid_to_kill)) except OSError: logger.error("Toil process %s could not be terminated." % str(pid_to_kill)) raise