示例#1
0
    def add_host(self, host):
        """
        Add a host to be managed by the orchestrator.

        :param host: host name
        """
        self.inventory[host] = {}
        self._save_inventory()
        self.inventory_cache[host] = orchestrator.OutdatableData()
        self.service_cache[host] = orchestrator.OutdatableData()
        return "Added host '{}'".format(host)
示例#2
0
    def __init__(self, *args, **kwargs):
        super(SSHOrchestrator, self).__init__(*args, **kwargs)
        self._cluster_fsid = self.get('mon_map')['fsid']

        self.config_notify()

        path = self.get_ceph_option('ceph_daemon_path')
        try:
            with open(path, 'r') as f:
                self._ceph_daemon = f.read()
        except (IOError, TypeError) as e:
            raise RuntimeError("unable to read ceph-daemon at '%s': %s" % (
                path, str(e)))

        self._worker_pool = multiprocessing.pool.ThreadPool(1)

        self._reconfig_ssh()

        SSHOrchestrator.instance = self
        self.all_progress_references = list()  # type: List[orchestrator.ProgressReference]

        # load inventory
        i = self.get_store('inventory')
        if i:
            self.inventory = json.loads(i)
        else:
            self.inventory = dict()
        self.log.debug('Loaded inventory %s' % self.inventory)

        # The values are cached by instance.
        # cache is invalidated by
        # 1. timeout
        # 2. refresh parameter
        self.inventory_cache = orchestrator.OutdatablePersistentDict(
            self, self._STORE_HOST_PREFIX + '.devices')

        self.service_cache = orchestrator.OutdatablePersistentDict(
            self, self._STORE_HOST_PREFIX + '.services')

        # ensure the host lists are in sync
        for h in self.inventory.keys():
            if h not in self.inventory_cache:
                self.log.debug('adding inventory item for %s' % h)
                self.inventory_cache[h] = orchestrator.OutdatableData()
            if h not in self.service_cache:
                self.log.debug('adding service item for %s' % h)
                self.service_cache[h] = orchestrator.OutdatableData()
        for h in self.inventory_cache:
            if h not in self.inventory:
                del self.inventory_cache[h]
        for h in self.service_cache:
            if h not in self.inventory:
                del self.service_cache[h]
示例#3
0
文件: module.py 项目: micmejia/ceph
        def run(host, host_info):
            # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode

            timeout_min = int(
                self.get_module_option(
                    "inventory_cache_timeout_min",
                    self._DEFAULT_INVENTORY_CACHE_TIMEOUT_MIN))

            if host_info.outdated(timeout_min) or refresh:
                self.log.info("refresh stale inventory for '{}'".format(host))
                out, code = self._run_ceph_daemon(
                    host, 'osd', 'ceph-volume',
                    ['--', 'inventory', '--format=json'])
                # stdout and stderr get combined; assume last line is the real
                # output and everything preceding it is an error.
                data = json.loads(out[-1])
                host_info = orchestrator.OutdatableData(data)
                self.inventory_cache[host] = host_info
            else:
                self.log.debug(
                    "reading cached inventory for '{}'".format(host))

            devices = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(
                host_info.data)
            return orchestrator.InventoryNode(host, devices)
示例#4
0
        def process_result(event_data):
            result = []
            if event_data['success']:
                for service_node, service_info in event_data["return"].items():
                    node_service_cache = []
                    for this_service_type, service_dict in service_info.items():
                        if isinstance(service_dict, str):
                            # map old form where deepsea only returned service IDs
                            # to new form where it retuns a dict
                            service_dict = { 'service_instance': service_dict }
                        desc = orchestrator.ServiceDescription(nodename=service_node,
                                                               service_instance=service_dict['service_instance'],
                                                               service_type=_deepsea_to_ceph(this_service_type),
                                                               # the following may or may not be present
                                                               container_id=service_dict.get('container_id', None),
                                                               service=service_dict.get('service', None),
                                                               version=service_dict.get('version', None),
                                                               rados_config_location=service_dict.get('rados_config_location', None),
                                                               service_url = service_dict.get('service_url', None),
                                                               status=service_dict.get('status', None),
                                                               status_desc=service_dict.get('status_desc', None)
                                                               )
                        # Always add every service to the cache...
                        node_service_cache.append(desc.to_json())
                        # ...but only return the ones the caller asked for
                        if ((service_type is None or desc.service_type == service_type) and
                            (service_id is None or desc.service_instance == service_id) and
                            (node_name is None or desc.nodename == node_name)):
                            result.append(desc)

                    self.service_cache[service_node] = orchestrator.OutdatableData(node_service_cache)
            else:
                self.log.error(event_data['return'])
            return result
示例#5
0
 def _refresh_host_services(self, host):
     out, code = self._run_ceph_daemon(
         host, 'mon', 'ls', [], no_fsid=True)
     data = json.loads(''.join(out))
     self.log.error('refreshed host %s services: %s' % (host, data))
     self.service_cache[host] = orchestrator.OutdatableData(data)
     return data
示例#6
0
 def process_result(event_data):
     result = []
     if event_data['success']:
         for node_name, node_devs in event_data["return"].items():
             if node_filter is None:
                 self.inventory_cache[node_name] = orchestrator.OutdatableData(node_devs)
             devs = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(node_devs)
             result.append(orchestrator.InventoryNode(node_name, devs))
     else:
         self.log.error(event_data['return'])
     return result
示例#7
0
文件: module.py 项目: lianghongq/ceph
    def __init__(self, *args, **kwargs):
        super(SSHOrchestrator, self).__init__(*args, **kwargs)
        self._cluster_fsid = self.get('mon_map')['fsid']

        self.config_notify()

        path = self.get_ceph_option('ceph_daemon_path')
        try:
            with open(path, 'r') as f:
                self._ceph_daemon = f.read()
        except IOError as e:
            raise RuntimeError("unable to read ceph-daemon at '%s': %s" %
                               (path, str(e)))

        self._worker_pool = multiprocessing.pool.ThreadPool(1)

        self._reconfig_ssh()

        # the keys in inventory_cache are authoritative.
        #   You must not call remove_outdated()
        # The values are cached by instance.
        # cache is invalidated by
        # 1. timeout
        # 2. refresh parameter
        self.inventory_cache = orchestrator.OutdatablePersistentDict(
            self, self._STORE_HOST_PREFIX + '.devices')

        self.service_cache = orchestrator.OutdatablePersistentDict(
            self, self._STORE_HOST_PREFIX + '.services')

        # ensure the host lists are in sync
        for h in set(self.inventory_cache.keys()) | set(
                self.service_cache.keys()):
            if h not in self.inventory_cache:
                self.log.debug('adding inventory item for %s' % h)
                self.inventory_cache[h] = orchestrator.OutdatableData()
            if h not in self.service_cache:
                self.log.debug('adding service item for %s' % h)
                self.service_cache[h] = orchestrator.OutdatableData()
示例#8
0
 def process_result(event_data):
     result = []
     if event_data['success']:
         for node_name, service_info in event_data["return"].items():
             node_service_cache = []
             for service_type, service_instance in service_info.items():
                 desc = orchestrator.ServiceDescription(nodename=node_name,
                                                        service_instance=service_instance,
                                                        service_type=service_type)
                 result.append(desc)
                 node_service_cache.append(desc.to_json())
             self.service_cache[node_name] = orchestrator.OutdatableData(node_service_cache)
     else:
         self.log.error(event_data['return'])
     return result
示例#9
0
文件: module.py 项目: smdsbz/ceph
        def run(host, host_info):
            # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode

            if host_info.outdated(self.inventory_cache_timeout) or refresh:
                self.log.info("refresh stale inventory for '{}'".format(host))
                out, code = self._run_ceph_daemon(
                    host, 'osd', 'ceph-volume',
                    ['--', 'inventory', '--format=json'])
                data = json.loads(''.join(out))
                host_info = orchestrator.OutdatableData(data)
                self.inventory_cache[host] = host_info
            else:
                self.log.debug(
                    "reading cached inventory for '{}'".format(host))

            devices = inventory.Devices.from_json(host_info.data)
            return orchestrator.InventoryNode(host, devices)
示例#10
0
 def process_result(event_data):
     result = []
     if event_data['success']:
         for node_name, node_devs in event_data["return"].items():
             if node_filter is None:
                 # The cache will only be populated when this function is invoked
                 # without a node filter, i.e. if you run it once for the whole
                 # cluster, you can then call it for individual nodes and return
                 # cached data.  However, if you only *ever* call it for individual
                 # nodes, the cache will never be populated, and you'll always have
                 # the full round trip to DeepSea.
                 self.inventory_cache[node_name] = orchestrator.OutdatableData(node_devs)
             devs = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(node_devs)
             result.append(orchestrator.InventoryNode(node_name, devs))
     else:
         self.log.error(event_data['return'])
     return result
示例#11
0
        def run(host, host_info):
            # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode

            timeout_min = int(
                self.get_module_option(
                    "inventory_cache_timeout_min",
                    self._DEFAULT_INVENTORY_CACHE_TIMEOUT_MIN))

            if host_info.outdated(timeout_min) or refresh:
                self.log.info("refresh stale inventory for '{}'".format(host))
                data = self._get_device_inventory(host)
                host_info = orchestrator.OutdatableData(data)
                self.inventory_cache[host] = host_info
            else:
                self.log.debug(
                    "reading cached inventory for '{}'".format(host))

            devices = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(
                host_info.data)
            return orchestrator.InventoryNode(host, devices)
示例#12
0
        def run(host, host_info):
            # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode

            timeout_min = int(
                self.get_module_option(
                    "inventory_cache_timeout_min",
                    self._DEFAULT_INVENTORY_CACHE_TIMEOUT_MIN))

            if host_info.outdated(timeout_min) or refresh:
                self.log.info("refresh stale inventory for '{}'".format(host))
                out, code = self._run_ceph_daemon(
                    host, 'osd', 'ceph-volume',
                    ['--', 'inventory', '--format=json'])
                data = json.loads(''.join(out))
                host_info = orchestrator.OutdatableData(data)
                self.inventory_cache[host] = host_info
            else:
                self.log.debug(
                    "reading cached inventory for '{}'".format(host))

            devices = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(
                host_info.data)
            return orchestrator.InventoryNode(host, devices)
示例#13
0
文件: module.py 项目: lianghongq/ceph
 def run(host):
     self.inventory_cache[host] = orchestrator.OutdatableData()
     self.service_cache[host] = orchestrator.OutdatableData()
     return "Added host '{}'".format(host)