def __init__(self): logging.info("CREATING NEW INSTANCE FOR Queue...") self.lock = threading_lock() self.queue = deque([]) self.tasks = {} self.workers = [] self.timer = None
def __init__(self, KEGG_DATA_DIR=""): logging.info("CREATING NEW INSTANCE FOR KeggInformationManager...") self.lock = threading_lock() self.lastOrganisms = deque([]) self.translationCache = {} #TODO: READ FROM CONF self.KEGG_DATA_DIR = KEGG_DATA_DIR + "current/common/"
def __init__(self): logging.info("Creating the new instance for queue...") self.id = self.get_random_id() # First we acquire the lock to avoid concurrent access to the queue self.lock = threading_lock() # Initialize the queue, task list and the workers self.queue = deque([]) self.tasks = {} self.workers = [] self.timer = None
def __init__(self, config, health_check_interval_seconds, scheduler=None, instance_watcher=None, quota_check=None, job_monitor=None, scheduler_mux=None, rpc_completion_timeout=RPC_COMPLETION_TIMEOUT_SECS): self._config = config self._job_key = JobKey(role=config.role(), environment=config.environment(), name=config.name()) self._health_check_interval_seconds = health_check_interval_seconds self._scheduler = scheduler or SchedulerProxy(config.cluster()) self._quota_check = quota_check or QuotaCheck(self._scheduler) self._scheduler_mux = scheduler_mux or SchedulerMux() self._job_monitor = job_monitor or JobMonitor( self._scheduler, self._config.job_key(), scheduler_mux=self._scheduler_mux) self._rpc_completion_timeout = rpc_completion_timeout try: self._update_config = UpdaterConfig(**config.update_config().get()) except ValueError as e: raise self.Error(str(e)) if self._update_config.pulse_interval_secs: raise self.Error( 'Pulse interval seconds is not supported by the client updater.' ) self._lock = None self._thread_lock = threading_lock() self._batch_wait_event = Event() self._batch_completion_queue = Queue() self.failure_threshold = FailureThreshold( self._update_config.max_per_instance_failures, self._update_config.max_total_failures) self._watcher = instance_watcher or InstanceWatcher( self._scheduler, self._job_key, self._update_config.restart_threshold, self._update_config.watch_secs, self._health_check_interval_seconds, scheduler_mux=self._scheduler_mux) self._terminating = False
def __init__(self, config, health_check_interval_seconds, scheduler=None, instance_watcher=None, quota_check=None, job_monitor=None, scheduler_mux=None, rpc_completion_timeout=RPC_COMPLETION_TIMEOUT_SECS): self._config = config self._job_key = JobKey(role=config.role(), environment=config.environment(), name=config.name()) self._health_check_interval_seconds = health_check_interval_seconds self._scheduler = scheduler or SchedulerProxy(config.cluster()) self._quota_check = quota_check or QuotaCheck(self._scheduler) self._scheduler_mux = scheduler_mux or SchedulerMux() self._job_monitor = job_monitor or JobMonitor( self._scheduler, self._config.job_key(), scheduler_mux=self._scheduler_mux) self._rpc_completion_timeout = rpc_completion_timeout try: self._update_config = UpdaterConfig(**config.update_config().get()) except ValueError as e: raise self.Error(str(e)) if self._update_config.pulse_interval_secs: raise self.Error('Pulse interval seconds is not supported by the client updater.') self._lock = None self._thread_lock = threading_lock() self._batch_wait_event = Event() self._batch_completion_queue = Queue() self.failure_threshold = FailureThreshold( self._update_config.max_per_instance_failures, self._update_config.max_total_failures ) self._watcher = instance_watcher or InstanceWatcher( self._scheduler, self._job_key, self._update_config.restart_threshold, self._update_config.watch_secs, self._health_check_interval_seconds, scheduler_mux=self._scheduler_mux) self._terminating = False