def __init__(self, *args, **kwargs): super(SSHOrchestrator, self).__init__(*args, **kwargs) self._cluster_fsid = self.get('mon_map')['fsid'] self.config_notify() path = self.get_ceph_option('ceph_daemon_path') try: with open(path, 'r') as f: self._ceph_daemon = f.read() except IOError as e: raise RuntimeError("unable to read ceph-daemon at '%s': %s" % (path, str(e))) self._worker_pool = multiprocessing.pool.ThreadPool(1) self._reconfig_ssh() # the keys in inventory_cache are authoritative. # You must not call remove_outdated() # The values are cached by instance. # cache is invalidated by # 1. timeout # 2. refresh parameter self.inventory_cache = orchestrator.OutdatablePersistentDict( self, self._STORE_HOST_PREFIX + '.devices') self.daemon_cache = orchestrator.OutdatablePersistentDict( self, self._STORE_HOST_PREFIX + '.daemons')
def __init__(self, *args, **kwargs): super(SSHOrchestrator, self).__init__(*args, **kwargs) self._cluster_fsid = self.get('mon_map')['fsid'] self.config_notify() path = self.get_ceph_option('ceph_daemon_path') try: with open(path, 'r') as f: self._ceph_daemon = f.read() except (IOError, TypeError) as e: raise RuntimeError("unable to read ceph-daemon at '%s': %s" % ( path, str(e))) self._worker_pool = multiprocessing.pool.ThreadPool(1) self._reconfig_ssh() SSHOrchestrator.instance = self self.all_progress_references = list() # type: List[orchestrator.ProgressReference] # load inventory i = self.get_store('inventory') if i: self.inventory = json.loads(i) else: self.inventory = dict() self.log.debug('Loaded inventory %s' % self.inventory) # The values are cached by instance. # cache is invalidated by # 1. timeout # 2. refresh parameter self.inventory_cache = orchestrator.OutdatablePersistentDict( self, self._STORE_HOST_PREFIX + '.devices') self.service_cache = orchestrator.OutdatablePersistentDict( self, self._STORE_HOST_PREFIX + '.services') # ensure the host lists are in sync for h in self.inventory.keys(): if h not in self.inventory_cache: self.log.debug('adding inventory item for %s' % h) self.inventory_cache[h] = orchestrator.OutdatableData() if h not in self.service_cache: self.log.debug('adding service item for %s' % h) self.service_cache[h] = orchestrator.OutdatableData() for h in self.inventory_cache: if h not in self.inventory: del self.inventory_cache[h] for h in self.service_cache: if h not in self.inventory: del self.service_cache[h]
def __init__(self, *args, **kwargs): super(SSHOrchestrator, self).__init__(*args, **kwargs) self._cluster_fsid = None self._worker_pool = multiprocessing.pool.ThreadPool(1) # the keys in inventory_cache are authoritative. # You must not call remove_outdated() # The values are cached by instance. # cache is invalidated by # 1. timeout # 2. refresh parameter self.inventory_cache = orchestrator.OutdatablePersistentDict( self, self._STORE_HOST_PREFIX)