def load_and_execute_sds_discovery_plugins(self): LOG.info("load_and_execute_sds_discovery_plugins") try: sds_discovery_manager = SDSDiscoveryManager() except ValueError as ex: LOG.error('Failed to init SDSDiscoveryManager. \Error %s' % str(ex)) return # Execute the SDS discovery plugins and tag the nodes with data for plugin in sds_discovery_manager.get_available_plugins(): sds_details = plugin.discover_storage_system() if len(sds_details.keys()) > 0: dict = {} for key in sds_details['cluster_attrs'].keys(): dict[key] = sds_details['cluster_attrs'][key] try: self.persister_thread.update_node_context( NodeContext(updated=str(time.time()), node_id=utils.get_local_node_context(), sds_pkg_name=sds_details['pkg_name'], sds_pkg_version=sds_details['pkg_version'], detected_cluster_id=sds_details[ 'detected_cluster_id'], cluster_attrs=dict)) except etcd.EtcdException as ex: LOG.error('Failed to update etcd . \Error %s' % str(ex)) break
def load_and_execute_platform_discovery_plugins(self): # platform plugins LOG.info("load_and_execute_platform_discovery_plugins, platform \ plugins") try: pMgr = PlatformManager() except ValueError as ex: LOG.error('Failed to init PlatformManager. \Error %s' % str(ex)) return # execute the platform plugins for plugin in pMgr.get_available_plugins(): platform_details = plugin.discover_platform() if len(platform_details.keys()) > 0: # update etcd try: self.persister_thread.update_platform( Platform( updated=str(time.time()), os=platform_details["Name"], os_version=platform_details["OSVersion"], kernel_version=platform_details["KernelVersion"], node_id=utils.get_local_node_context(), )) except etcd.EtcdException as ex: LOG.error('Failed to update etcd . \Error %s' % str(ex)) break
def __init__(self, machine_id): self._complete = gevent.event.Event() # Initialize the state sync thread which gets the underlying # node details and pushes the same to etcd etcd_kwargs = { 'port': int(config.get("commons", "etcd_port")), 'host': config.get("commons", "etcd_connection") } self.etcd_client = etcd.Client(**etcd_kwargs) local_node_context = utils.set_local_node_context() if local_node_context: if utils.get_node_context(self.etcd_client, local_node_context) \ is None: utils.delete_local_node_context() node_id = utils.get_local_node_context() super(NodeAgentManager, self).__init__("node", node_id, config, NodeAgentSyncStateThread(self), NodeAgentEtcdPersister(config), "/tendrl_definitions_node_agent/data", node_id=node_id) self.register_node(machine_id) self.load_and_execute_platform_discovery_plugins() self.load_and_execute_sds_discovery_plugins()
def register_node(self, machine_id): update_node_context(self, machine_id) tendrl_context = pull_hardware_inventory.getTendrlContext() self.persister_thread.update_tendrl_context( TendrlContext( updated=str(time.time()), sds_version=tendrl_context['sds_version'], node_id=utils.get_local_node_context(), sds_name=tendrl_context['sds_name'], )) self.persister_thread.update_node( Node(node_id=utils.get_local_node_context(), fqdn=socket.getfqdn(), status="UP")) self.persister_thread.update_tendrl_definitions( TendrlDefinitions(updated=str(time.time()), data=def_data))
def get_node_inventory(): node_inventory = {} cmd = Command({"_raw_params": "cat /etc/machine-id"}) out, err = cmd.start() out = out['stdout'] node_inventory["machine_id"] = out node_inventory["node_id"] = mgr_utils.get_local_node_context() node_inventory["os"] = getNodeOs() node_inventory["cpu"] = getNodeCpu() node_inventory["memory"] = getNodeMemory() node_inventory["tendrl_context"] = getTendrlContext() return node_inventory
def run(self): curr_node_id = manager_utils.get_local_node_context() cluster_id = self.parameters['TendrlContext.integration_id'] node_list = self.parameters['Node[]'] if len(node_list) > 1: # This is the master node for this flow for node in node_list: if curr_node_id != node: new_params = self.parameters.copy() new_params['Node[]'] = [node] # create same flow for each node in node list except $this job = { "integration_id": cluster_id, "node_id": node, "run": "tendrl.node_agent.flows.ImportCluster", "status": "new", "parameters": new_params, "parent": self.parameters['request_id'], "type": "node" } if "etcd_orm" in job['parameters']: del job['parameters']['etcd_orm'] if "manager" in job['parameters']: del job['parameters']['manager'] if "config" in job['parameters']: del job['parameters']['config'] self.etcd_orm.client.write("/queue/%s" % uuid.uuid4(), json.dumps(job)) if curr_node_id in node_list: self.parameters['fqdn'] = socket.getfqdn() installation_source_type = self.config.get( "node-agent", "installation_source_type") self.parameters['Package.pkg_type'] = installation_source_type self.parameters['Package.name'] = get_package_name( installation_source_type) self.parameters['Node.cmd_str'] = "tendrl-ceph-integration " \ "--cluster-id %s" % \ cluster_id tendrl_context = "nodes/%s/TendrlContext/cluster_id" % \ curr_node_id self.etcd_orm.client.write(tendrl_context, cluster_id) return super(ImportCluster, self).run()