def __init__(self, app, dispatcher): """Start the job manager""" self.app = app self.dispatcher = dispatcher self.sa_session = app.model.context self.track_jobs_in_database = self.app.config.track_jobs_in_database # Initialize structures for handling job limits self.__clear_user_job_count() # Keep track of the pid that started the job manager, only it # has valid threads self.parent_pid = os.getpid() # Contains new jobs. Note this is not used if track_jobs_in_database is True self.queue = Queue() # Contains jobs that are waiting (only use from monitor thread) self.waiting_jobs = [] # Contains wrappers of jobs that are limited or ready (so they aren't created unnecessarily/multiple times) self.job_wrappers = {} # Helper for interruptable sleep self.sleeper = Sleeper() self.running = True self.monitor_thread = threading.Thread( name="JobHandlerQueue.monitor_thread", target=self.__monitor) self.monitor_thread.setDaemon(True)
def __init__(self, app, job_handler): self.app = app self.job_handler = job_handler # the (singular) handler if we are passing jobs in memory self.sa_session = app.model.context self.job_lock = False # Keep track of the pid that started the job manager, only it # has valid threads self.parent_pid = os.getpid() # Contains new jobs. Note this is not used if track_jobs_in_database is True self.queue = Queue() # Helper for interruptable sleep self.sleeper = Sleeper() self.running = True self.monitor_thread = threading.Thread(target=self.__monitor) # Recover jobs at startup self.__check_jobs_at_startup() # Start the queue self.monitor_thread.start() log.info("job manager queue started")
def __init__(self, app, dispatcher): """Start the job manager""" self.app = app self.dispatcher = dispatcher self.sa_session = app.model.context self.track_jobs_in_database = self.app.config.track_jobs_in_database # Keep track of the pid that started the job manager, only it # has valid threads self.parent_pid = os.getpid() # Contains new jobs. Note this is not used if track_jobs_in_database is True self.queue = Queue() # Contains jobs that are waiting (only use from monitor thread) self.waiting_jobs = [] # Helper for interruptable sleep self.sleeper = Sleeper() self.running = True self.monitor_thread = threading.Thread( name="JobHandlerQueue.monitor_thread", target=self.__monitor) self.monitor_thread.setDaemon(True)
def __init__(self, app, job_handler): self.app = app self.job_handler = job_handler self.sa_session = app.model.context # Keep track of the pid that started the job manager, only it # has valid threads self.parent_pid = os.getpid() # Contains new jobs. Note this is not used if track_jobs_in_database is True self.queue = Queue() # Contains jobs that are waiting (only use from monitor thread) self.waiting = [] # Helper for interruptable sleep self.sleeper = Sleeper() self.running = True self.monitor_thread = threading.Thread(target=self.monitor) self.monitor_thread.start() log.info("job manager stop queue started")
def __init__(self, config, fsmon=False): super(DistributedObjectStore, self).__init__() self.distributed_config = config.distributed_object_store_config_file assert self.distributed_config is not None, "distributed object store ('object_store = distributed') " \ "requires a config file, please set one in " \ "'distributed_object_store_config_file')" self.backends = {} self.weighted_backend_ids = [] self.original_weighted_backend_ids = [] self.max_percent_full = {} self.global_max_percent_full = 0.0 random.seed() self.__parse_distributed_config(config) self.sleeper = None if fsmon and ( self.global_max_percent_full or filter( lambda x: x != 0.0, self.max_percent_full.values() ) ): self.sleeper = Sleeper() self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor) self.filesystem_monitor_thread.setDaemon( True ) self.filesystem_monitor_thread.start() log.info("Filesystem space monitor started")
def __init__(self, config): super(S3ObjectStore, self).__init__() self.config = config self.staging_path = self.config.file_path self.s3_conn = get_OS_connection(self.config) self.bucket = self._get_bucket(self.config.os_bucket_name) self.use_rr = self.config.os_use_reduced_redundancy self.cache_size = self.config.object_store_cache_size self.transfer_progress = 0 # Clean cache only if value is set in universe_wsgi.ini if self.cache_size != -1: # Convert GBs to bytes for comparison self.cache_size = self.cache_size * 1073741824 # Helper for interruptable sleep self.sleeper = Sleeper() self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor) self.cache_monitor_thread.start() log.info("Cache cleaner manager started") # Test if 'axel' is available for parallel download and pull the key into cache try: subprocess.call('axel') self.use_axel = True except OSError: self.use_axel = False