def __init__(self, username=None, password=None): TortugaWsApi.__init__(self, username, password) cm = ConfigManager() self.serverHostname = cm.getInstaller() self.serverPort = cm.getAdminPort() self.serverScheme = cm.getAdminScheme()
class TortugaWsApi(RestApiClient): """ Base tortuga ws api class. """ def __init__(self, username: Optional[str] = None, password: Optional[str] = None, baseurl: Optional[str] = None, verify: bool = True): self._cm = ConfigManager() if not baseurl: baseurl = '{}://{}:{}'.format( self._cm.getAdminScheme(), self._cm.getInstaller(), self._cm.getAdminPort() ) if username is None and password is None: logger.debug('Using built-in user credentials') username = self._cm.getCfmUser() password = self._cm.getCfmPassword() super().__init__(username, password, baseurl, verify) self.baseurl = '{}/{}'.format(self.baseurl, WS_API_VERSION) def process_response(self, response: requests.Response): check_status(response.headers) return super().process_response(response)
class TortugaWsApiClient: """ Tortuga ws api client class. """ def __init__(self, endpoint: str, username: Optional[str] = None, password: Optional[str] = None, base_url: Optional[str] = None, verify: bool = True) -> None: self._cm = ConfigManager() if not base_url: base_url = '{}://{}:{}'.format( self._cm.getAdminScheme(), self._cm.getInstaller(), self._cm.getAdminPort() ) if username is None and password is None: logger.debug('Using built-in user credentials') username = self._cm.getCfmUser() password = self._cm.getCfmPassword() self._client = RestApiClient( username=username, password=password, baseurl=base_url, verify=verify ) self._client.baseurl = '{}/{}/{}/'.format(base_url, WS_API_VERSION, endpoint) def _build_query_string(self, params: dict) -> str: \ # pylint: disable=no-self-use return '&'.join([f'{k}={v}' for k, v in params.items()]) def list(self, **params) -> list: path = '/' query_string = self._build_query_string(params) if query_string: path += '?{}'.format(query_string) return self._client.get(path) def get(self, id_: str) -> dict: path = '/{}'.format(id_) return self._client.get(path)
def pip_install_requirements(requirements_path): """ Installs packages specified in a requirements.txt file, using the tortuga package repo in addition to the standard python repos. This function returns nothing, and does nothing if the requirements.txt file is not found. :param requirements_path: the path to the requirements.txt file """ cm = ConfigManager() if not os.path.exists(requirements_path): logger.debug('Requirements not found: {}'.format(requirements_path)) return if is_requirements_empty(requirements_path): logger.debug('Requirements empty: {}'.format(requirements_path)) return pip_cmd = [ '{}/pip'.format(cm.getBinDir()), 'install', ] installer = cm.getInstaller() int_webroot = cm.getIntWebRootUrl(installer) installer_repo = '{}/python-tortuga/simple/'.format(int_webroot) if cm.is_offline_installation(): # add tortuga distribution repo pip_cmd.append('--index-url') pip_cmd.append(installer_repo) # add offline dependencies repo pip_cmd.append('--extra-index-url') pip_cmd.append('{}/offline-deps/python/simple/'.format(int_webroot)) else: pip_cmd.append('--extra-index-url') pip_cmd.append(installer_repo) pip_cmd.extend(['--trusted-host', installer, '-r', requirements_path]) logger.debug(' '.join(pip_cmd)) proc = subprocess.Popen(pip_cmd) proc.wait() if proc.returncode: raise Exception(proc.stderr)
def pip_install_requirements(requirements_path): """ Installs packages specified in a requirements.txt file, using the kit package repo in addition to the standard python repos. This function returns nothing, and does nothing if the requirements.txt file is not found. :param kit_installer: an instance of KitInstallerBase, which will be searched for a local python package repo :param requirements_path: the path to the requirements.txt file """ # # In the kit directory: # # /opt/tortuga/kits/kit-x.y.z/tortuga_kits/kit_x_y_z # # if there is a python_packages directory, with a simple subdirectory # in it, it is assumed that the simple subdirectory is a PEP 503 # compliant Python package repository. If found, this directory is # added to the list of directories searched for Python packages via # pip when installing the requirements.txt file. # # These directories can easily be created using the py2pi utility. # cm = ConfigManager() if not os.path.exists(requirements_path): logger.debug('Requirements not found: {}'.format(requirements_path)) return if is_requirements_empty(requirements_path): logger.debug('Requirements empty: {}'.format(requirements_path)) return installer = cm.getInstaller() int_webroot = cm.getIntWebRootUrl(installer) installer_repo = '{}/python-tortuga/simple/'.format(int_webroot) pip_cmd = [ '{}/pip'.format(cm.getBinDir()), 'install', '--extra-index-url', installer_repo, '--trusted-host', installer, '-r', requirements_path ] logger.debug(' '.join(pip_cmd)) subprocess.Popen(pip_cmd).wait()
def execute(self, args: argparse.Namespace): """ Listen for events on the Tortuga websocket and print them to stdout. """ cm = ConfigManager() url, username, password, verify = get_web_service_config(args) # # If we get a URL from the environment or CLI, we need to transform # it from the installer REST API URL into a websocket URL # if url: # # Replace http[s] with ws[s] # url = url.replace('http', 'ws') # # Replace port with websocket port # url_parts = url.split(':') url = '{}:{}:{}'.format(url_parts[0], url_parts[1], cm.getWebsocketPort()) # # Otherwise, use the default URL from the config manager # else: url = '{}://{}:{}'.format(cm.getWebsocketScheme(), cm.getInstaller(), cm.getWebsocketPort()) ws_client = WebsocketClient(username=username, password=password, url=url, verify=verify) try: asyncio.get_event_loop().run_until_complete(ws_client.start()) except KeyboardInterrupt: sys.exit(0)
def get_installer(args: argparse.Namespace) -> str: """ Gets the hostname of the Tortuga installer. :param argparse.Namespace args: argparse arguments :return str: the URL """ cm = ConfigManager() url, username, password, verify = get_web_service_config(args) if url: url_parts = url.split(':') host = url_parts[1].replace('//', '') else: host = cm.getInstaller() return host
class TortugaWsApiClient: """ Tortuga ws api client class. """ def __init__(self, endpoint: str, token: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, base_url: Optional[str] = None, verify: bool = True) -> None: self._cm = ConfigManager() if not base_url: base_url = '{}://{}:{}'.format(self._cm.getAdminScheme(), self._cm.getInstaller(), self._cm.getAdminPort()) if not token: if username is None and password is None: logger.debug('Using built-in user credentials') username = self._cm.getCfmUser() password = self._cm.getCfmPassword() self._client = RestApiClient(token=token, username=username, password=password, baseurl=base_url, verify=verify) self._client.baseurl = '{}/{}/{}/'.format(base_url, WS_API_VERSION, endpoint) def _build_query_string(self, params: dict) -> str: return '&'.join([f'{k}={v}' for k, v in params.items()]) def list(self, **params) -> list: path = '/' query_string = self._build_query_string(params) if query_string: path += '?{}'.format(query_string) return self._client.get(path) def get(self, id_: str) -> dict: path = '/{}'.format(id_) return self._client.get(path) def post(self, data: dict) -> dict: path = '/' return self._client.post(path, data=data) def put(self, data: dict) -> dict: if not data or 'id' not in data.keys(): raise Exception('Object does not have an id field') id_ = data['id'] if not id_: raise Exception('Object id is invalid') path = '/{}'.format(id_) return self._client.put(path, data=data) def delete(self, id_: str): path = '/{}'.format(id_) return self._client.delete(path)
class ComponentInstaller(ComponentInstallerBase): """ Tortuga DHCP component. """ name = 'dhcpd' version = '7.0.3' os_list = [ {'family': 'rhel', 'version': '6', 'arch': 'x86_64'}, {'family': 'rhel', 'version': '7', 'arch': 'x86_64'}, ] installer_only = True def __init__(self, kit): """ Initialise parent class. """ super().__init__(kit) self._provider = DhcpdDhcpProvider(self) self._manager = self._get_os_dhcpd_manager('dhcpd') self._config = ConfigManager() def _get_os_dhcpd_manager(self, name): """ Get dhcpd manager for the appropriate os. :param name: the name of the dhcpd manager to get :returns: the dhcpd manager instance """ dir_name = '{}/util'.format(self.kit_installer.kit_path) dhcpd_manager = \ getOsObjectFactory().getOsKitApplicationManager(name, dir_name) return dhcpd_manager def _get_provisioning_networks(self): """ Get provisioning networks. :returns: Generator provisioning networks """ for network in NetworksDbHandler().getNetworkList(self.session): if network.type == 'provision': yield network def _get_provisioning_nics(self, node): """ Get provisioning nics. :param node: Node object :returns: Generator nics """ for nic in node.getNics(): if nic.getNetwork().getType() == 'provision': yield nic def _get_provisioning_nics_ip(self, node): """ Get provisioning nics IP addresses. :param node: Node object :returns: Generator IPv4Address """ for nic in self._get_provisioning_nics(node): yield ipaddress.IPv4Address(nic.getIp()) @staticmethod def _get_local_nics(nics): """ Get valid NICs. :returns: Generator nics """ for nic in nics: if nic.boot and nic.mac: yield nic def _get_installer_ip(self, network_id): """ Return IP address of provisioning interface on installer :raises NicNotFound: """ installer_node = NodeApi().getInstallerNode(self.session) prov_nics = self._get_provisioning_nics(installer_node) for prov_nic in prov_nics: if prov_nic.getNetwork().getId() == network_id: return ipaddress.IPv4Address(prov_nic.getIp()) raise NicNotFound( 'Network has no corresponding provisioning NIC on installer') def _dhcp_subnets(self): """ DHCP subnet dictionary. :returns: Dictionary IPv4Network network address IPv4Network subnet """ subnets = {} for network in self._get_provisioning_networks(): subnet = {'nodes': []} installer_ip = self._get_installer_ip(network.id) subnet['installerIp'] = installer_ip if not network.gateway: logger.info( '[dhcpd] Gateway not defined for network [{}/{}], using' ' IP [{}]'.format( network.address, network.netmask, installer_ip ) ) subnet['gateway'] = installer_ip else: subnet['gateway'] = network.gateway for nic in self._get_local_nics(network.nics): node = nic.node if node.hardwareprofile.location != 'local' \ or node.state == 'Deleted' \ or node.name == self._config.getInstaller(): continue node = { 'ip': nic.ip, 'mac': nic.mac, 'fqdn': node.name, 'hostname': node.name.split('.', 1)[0], 'unmanaged': False } subnet['nodes'].append(node) subnet_address = ipaddress.IPv4Network('{}/{}'.format( network.address, network.netmask )) subnets[subnet_address] = subnet return subnets @property def _get_kit_settings_dictionary(self): """ :returns: Dictionary """ settings = {} config = configparser.ConfigParser() config.read(os.path.join( self._config.getKitConfigBase(), 'tortuga.ini' )) if config.has_section('tortuga_kit_base'): if config.has_option('tortuga_kit_base', 'disable_services'): settings['disable_services'] = \ config.get('tortuga_kit_base', 'disable_services') \ .split(' ') return settings def _configure(self, softwareProfileName, fd, *args, **kwargs): """ Shim for unused arguments. :param softwareProfileName: :param fd: :param *args: :param **kwargs: :returns: None """ self.action_configure(softwareProfileName, *args, **kwargs) def action_configure(self, _, *args, **kwargs): """ Configure. :param _: Unused :param *args: Unused :param **kwargs: Unused :returns: None """ try: result = GlobalParameterDbApi().getParameter( self.session, 'DHCPLeaseTime' ) dhcp_lease_time = int(result.getValue()) except ParameterNotFound: dhcp_lease_time = 2400 try: result = GlobalParameterDbApi().getParameter( self.session, 'DNSZone') dns_zone = result.getValue() except ParameterNotFound: dns_zone = '' installer_node = NodeApi().getInstallerNode(self.session) self._manager.configure( dhcp_lease_time, dns_zone, self._get_provisioning_nics_ip(installer_node), self._dhcp_subnets(), installerNode=installer_node, bUpdateSysconfig=kwargs.get('bUpdateSysconfig', True), kit_settings=self._get_kit_settings_dictionary ) def action_post_install(self, *args, **kwargs): """ Triggered post install. :param *args: List Objects :param **kwargs: Dictionary Objects :returns: None """ self._provider.write() self.action_configure(None, args, kwargs, bUpdateSysconfig=True) def action_add_host(self, hardware_profile_name, software_profile_name, nodes, *args, **kwargs): """ Triggerd at add host. :returns: None """ self.action_configure( software_profile_name, None, args, kwargs, bUpdateSysconfig=False ) def action_delete_host(self, hardware_profile_name, software_profile_name, nodes, *args, **kwargs): """ Triggered delete host. :returns: None """ self.action_configure( software_profile_name, None, args, kwargs, bUpdateSysconfig=False )
class NodeManager(TortugaObjectManager): \ # pylint: disable=too-many-public-methods def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._cm = ConfigManager() self._bhm = osUtility.getOsObjectFactory().getOsBootHostManager( self._cm) self._syncApi = SyncApi() self._nodesDbHandler = NodesDbHandler() self._addHostManager = AddHostManager() self._logger = logging.getLogger(NODE_NAMESPACE) def __validateHostName(self, hostname: str, name_format: str) -> None: """ Raises: ConfigurationError """ bWildcardNameFormat = (name_format == '*') if hostname and not bWildcardNameFormat: # Host name specified, but hardware profile does not # allow setting the host name raise ConfigurationError( 'Hardware profile does not allow setting host names' ' of imported nodes') elif not hostname and bWildcardNameFormat: # Host name not specified but hardware profile expects it raise ConfigurationError( 'Hardware profile requires host names to be set') def createNewNode(self, session: Session, addNodeRequest: dict, dbHardwareProfile: HardwareProfileModel, dbSoftwareProfile: Optional[SoftwareProfileModel] = None, validateIp: bool = True, bGenerateIp: bool = True, dns_zone: Optional[str] = None) -> NodeModel: """ Convert the addNodeRequest into a Nodes object Raises: NicNotFound """ self._logger.debug( 'createNewNode(): session=[%s], addNodeRequest=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s],' ' validateIp=[%s], bGenerateIp=[%s]' % (id(session), addNodeRequest, dbHardwareProfile.name, dbSoftwareProfile.name if dbSoftwareProfile else '(none)', validateIp, bGenerateIp)) hostname = addNodeRequest['name'] \ if 'name' in addNodeRequest else None # Ensure no conflicting options (ie. specifying host name for # hardware profile in which host names are generated) self.__validateHostName(hostname, dbHardwareProfile.nameFormat) node = NodeModel(name=hostname) if 'rack' in addNodeRequest: node.rack = addNodeRequest['rack'] node.addHostSession = addNodeRequest['addHostSession'] # Complete initialization of new node record nic_defs = addNodeRequest['nics'] \ if 'nics' in addNodeRequest else [] AddHostServerLocal().initializeNode(session, node, dbHardwareProfile, dbSoftwareProfile, nic_defs, bValidateIp=validateIp, bGenerateIp=bGenerateIp, dns_zone=dns_zone) # Set hardware profile of new node node.hardwareprofile = dbHardwareProfile node.softwareprofile = dbSoftwareProfile # Return the new node return node def getNode(self, session: Session, name, optionDict: OptionDict = None) \ -> Node: """ Get node by name Raises: NodeNotFound """ return self.__populate_nodes(session, [ self._nodeDbApi.getNode( session, name, optionDict=get_default_relations(optionDict)) ])[0] def getNodeById(self, session: Session, nodeId: int, optionDict: OptionDict = None) -> Node: """ Get node by node id Raises: NodeNotFound """ return self.__populate_nodes(session, [ self._nodeDbApi.getNodeById( session, int(nodeId), optionDict=get_default_relations(optionDict)) ])[0] def getNodeByIp(self, session: Session, ip: str, optionDict: Dict[str, bool] = None) -> Node: """ Get node by IP address Raises: NodeNotFound """ return self.__populate_nodes(session, [ self._nodeDbApi.getNodeByIp( session, ip, optionDict=get_default_relations(optionDict)) ])[0] def getNodeList(self, session, tags=None, optionDict: Optional[OptionDict] = None) -> List[Node]: """ Return all nodes """ return self.__populate_nodes( session, self._nodeDbApi.getNodeList( session, tags=tags, optionDict=get_default_relations(optionDict))) def __populate_nodes(self, session: Session, nodes: List[Node]) -> List[Node]: """ Expand non-database fields in Node objects """ class SoftwareProfileMetadataCache(defaultdict): def __missing__(self, key): metadata = \ SoftwareProfileManager().get_software_profile_metadata( session, key ) self[key] = metadata return metadata swprofile_map = SoftwareProfileMetadataCache() for node in nodes: if not node.getSoftwareProfile(): continue node.getSoftwareProfile().setMetadata( swprofile_map[node.getSoftwareProfile().getName()]) return nodes def updateNode(self, session: Session, nodeName: str, updateNodeRequest: dict) -> None: """ Calls updateNode() method of resource adapter """ self._logger.debug('updateNode(): name=[{0}]'.format(nodeName)) try: node = self._nodesDbHandler.getNode(session, nodeName) if 'nics' in updateNodeRequest: nic = updateNodeRequest['nics'][0] if 'ip' in nic: node.nics[0].ip = nic['ip'] node.nics[0].boot = True # Call resource adapter # self._nodesDbHandler.updateNode(session, node, updateNodeRequest) adapter = self.__getResourceAdapter(node.hardwareprofile) adapter.updateNode(session, node, updateNodeRequest) run_post_install = False # # Capture previous state and node data as dict for firing the # event later on # previous_state = node.state node_dict = Node.getFromDbDict(node.__dict__).getCleanDict() if 'state' in updateNodeRequest: run_post_install = \ node.state == state.NODE_STATE_ALLOCATED and \ updateNodeRequest['state'] == state.NODE_STATE_PROVISIONED node.state = updateNodeRequest['state'] node_dict['state'] = updateNodeRequest['state'] session.commit() # # If the node state has changed, then fire the node state changed # event # if node_dict['state'] != previous_state: NodeStateChanged.fire(node=node_dict, previous_state=previous_state) if run_post_install: self._logger.debug( 'updateNode(): run-post-install for node [{0}]'.format( node.name)) self.__scheduleUpdate() except Exception: session.rollback() raise def updateNodeStatus(self, session: Session, nodeName: str, node_state: Optional[str] = None, bootFrom: int = None): """Update node status If neither 'state' nor 'bootFrom' are not None, this operation will update only the 'lastUpdated' timestamp. Returns: bool indicating whether state and/or bootFrom differed from current value """ value = 'None' if bootFrom is None else \ '1 (disk)' if int(bootFrom) == 1 else '0 (network)' self._logger.debug( 'updateNodeStatus(): node=[%s], node_state=[{%s}],' ' bootFrom=[{%s}]', nodeName, node_state, value) dbNode = self._nodesDbHandler.getNode(session, nodeName) # # Capture previous state and node data in dict form for the # event later on # previous_state = dbNode.state node_dict = Node.getFromDbDict(dbNode.__dict__).getCleanDict() # Bitfield representing node changes (0 = state change, # 1 = bootFrom # change) changed = 0 if node_state is not None and node_state != dbNode.state: # 'state' changed changed |= 1 if bootFrom is not None and bootFrom != dbNode.bootFrom: # 'bootFrom' changed changed |= 2 if changed: # Create custom log message msg = 'Node [%s] state change:' % (dbNode.name) if changed & 1: msg += ' state: [%s] -> [%s]' % (dbNode.state, node_state) dbNode.state = node_state node_dict['state'] = node_state if changed & 2: msg += ' bootFrom: [%d] -> [%d]' % (dbNode.bootFrom, bootFrom) dbNode.bootFrom = bootFrom self._logger.info(msg) else: self._logger.info('Updated timestamp for node [%s]' % (dbNode.name)) dbNode.lastUpdate = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) result = bool(changed) # Only change local boot configuration if the hardware profile is # not marked as 'remote' and we're not acting on the installer # node. if dbNode.softwareprofile and \ dbNode.softwareprofile.type != 'installer' and \ dbNode.hardwareprofile.location != 'remote': # update local boot configuration for on-premise nodes self._bhm.writePXEFile(session, dbNode, localboot=bootFrom) session.commit() # # If the node state has changed, fire the node state changed # event # if state and (previous_state != state): NodeStateChanged.fire(node=node_dict, previous_state=previous_state) return result def __process_nodeErrorDict(self, nodeErrorDict): result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, 'addHostSession': node.addHostSession, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def deleteNode(self, session, nodespec: str, force: bool = False): """ Delete node by nodespec Raises: NodeNotFound """ kitmgr = KitActionsManager() kitmgr.session = session try: nodes = self._nodesDbHandler.expand_nodespec( session, nodespec, include_installer=False) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) self.__validate_delete_nodes_request(nodes, force) self.__preDeleteHost(kitmgr, nodes) nodeErrorDict = self.__delete_node(session, nodes) # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict) session.commit() # ============================================================ # Perform actions *after* node deletion(s) have been committed # to database. # ============================================================ self.__postDeleteHost(kitmgr, nodes_deleted) addHostSessions = set( [tmpnode['addHostSession'] for tmpnode in nodes_deleted]) if addHostSessions: self._addHostManager.delete_sessions(addHostSessions) for nodeName in result['NodesDeleted']: # Remove the Puppet cert self._bhm.deletePuppetNodeCert(nodeName) self._bhm.nodeCleanup(nodeName) self._logger.info('Node [%s] deleted' % (nodeName)) # Schedule a cluster update self.__scheduleUpdate() return result except Exception: session.rollback() raise def __validate_delete_nodes_request(self, nodes: List[NodeModel], force: bool): """ Raises: DeleteNodeFailed """ swprofile_distribution: Dict[SoftwareProfileModel, int] = {} for node in nodes: if node.softwareprofile not in swprofile_distribution: swprofile_distribution[node.softwareprofile] = 0 swprofile_distribution[node.softwareprofile] += 1 errors: List[str] = [] for software_profile, num_nodes_deleted in \ swprofile_distribution.items(): if software_profile.lockedState == 'HardLocked': errors.append( f'Nodes cannot be deleted from hard locked software' ' profile [{software_profile.name}]') continue if software_profile.minNodes and \ len(software_profile.nodes) - num_nodes_deleted < \ software_profile.minNodes: if force and software_profile.lockedState == 'SoftLocked': # allow deletion of nodes when force is set and profile # is soft locked continue # do not allow number of software profile nodes to drop # below configured minimum errors.append( 'Software profile [{}] requires minimum of {} nodes;' ' denied request to delete {} node(s)'.format( software_profile.name, software_profile.minNodes, num_nodes_deleted)) continue if software_profile.lockedState == 'SoftLocked' and not force: errors.append( 'Nodes cannot be deleted from soft locked software' f' profile [{software_profile.name}]') if errors: raise OperationFailed('\n'.join(errors)) def __delete_node(self, session: Session, dbNodes: List[NodeModel]) \ -> Dict[str, List[NodeModel]]: """ Raises: DeleteNodeFailed """ result: Dict[str, list] = { 'NodesDeleted': [], 'DeleteNodeFailed': [], 'SoftwareProfileLocked': [], 'SoftwareProfileHardLocked': [], } nodes: Dict[HardwareProfileModel, List[NodeModel]] = {} events_to_fire: List[dict] = [] # # Mark node states as deleted in the database # for dbNode in dbNodes: # # Capture previous state and node data as a dict for firing # the event later on # event_data = { 'previous_state': dbNode.state, 'node': Node.getFromDbDict(dbNode.__dict__).getCleanDict() } dbNode.state = state.NODE_STATE_DELETED event_data['node']['state'] = 'Deleted' if dbNode.hardwareprofile not in nodes: nodes[dbNode.hardwareprofile] = [dbNode] else: nodes[dbNode.hardwareprofile].append(dbNode) session.commit() # # Fire node state change events # for event in events_to_fire: NodeStateChanged.fire(node=event['node'], previous_state=event['previous_state']) # # Call resource adapter with batch(es) of node lists keyed on # hardware profile. # for hwprofile, hwprofile_nodes in nodes.items(): # Get the ResourceAdapter adapter = self.__get_resource_adapter(session, hwprofile) # Call the resource adapter adapter.deleteNode(hwprofile_nodes) # Iterate over all nodes in hardware profile, completing the # delete operation. for dbNode in hwprofile_nodes: for tag in dbNode.tags: if len(tag.nodes) == 1 and \ not tag.softwareprofiles and \ not tag.hardwareprofiles: session.delete(tag) # Delete the Node self._logger.debug('Deleting node [%s]' % (dbNode.name)) session.delete(dbNode) result['NodesDeleted'].append(dbNode) return result def __get_resource_adapter(self, session: Session, hardwareProfile: HardwareProfileModel): """ Raises: OperationFailed """ if not hardwareProfile.resourceadapter: raise OperationFailed( 'Hardware profile [%s] does not have an associated' ' resource adapter' % (hardwareProfile.name)) adapter = resourceAdapterFactory.get_api( hardwareProfile.resourceadapter.name) adapter.session = session return adapter def __process_delete_node_result(self, nodeErrorDict): # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def __preDeleteHost(self, kitmgr: KitActionsManager, nodes): self._logger.debug('__preDeleteHost(): nodes=[%s]' % (' '.join([node.name for node in nodes]))) for node in nodes: kitmgr.pre_delete_host( node.hardwareprofile.name, node.softwareprofile.name if node.softwareprofile else None, nodes=[node.name]) def __postDeleteHost(self, kitmgr, nodes_deleted): # 'nodes_deleted' is a list of dicts of the following format: # # { # 'name': 'compute-01', # 'softwareprofile': 'Compute', # 'hardwareprofile': 'LocalIron', # } # # if the node does not have an associated software profile, the # dict does not contain the key 'softwareprofile'. self._logger.debug('__postDeleteHost(): nodes_deleted=[%s]' % (nodes_deleted)) if not nodes_deleted: self._logger.debug('No nodes deleted in this operation') return for node_dict in nodes_deleted: kitmgr.post_delete_host(node_dict['hardwareprofile'], node_dict['softwareprofile'] if 'softwareprofile' in node_dict else None, nodes=[node_dict['name']]) def __scheduleUpdate(self): self._syncApi.scheduleClusterUpdate() def getInstallerNode(self, session, optionDict: Optional[OptionDict] = None): return self._nodeDbApi.getNode( session, self._cm.getInstaller(), optionDict=get_default_relations(optionDict)) def getProvisioningInfo(self, session: Session, nodeName): return self._nodeDbApi.getProvisioningInfo(session, nodeName) def startupNode(self, session, nodespec: str, remainingNodeList: List[NodeModel] = None, bootMethod: str = 'n') -> None: """ Raises: NodeNotFound """ try: nodes = self._nodesDbHandler.expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No matching nodes for nodespec [%s]' % (nodespec)) # Break list of nodes into dict keyed on hardware profile nodes_dict = self.__processNodeList(nodes) for dbHardwareProfile, detailsDict in nodes_dict.items(): # Get the ResourceAdapter adapter = self.__getResourceAdapter(dbHardwareProfile) # Call startup action extension adapter.startupNode(detailsDict['nodes'], remainingNodeList=remainingNodeList or [], tmpBootMethod=bootMethod) session.commit() except TortugaException: session.rollback() raise except Exception as ex: session.rollback() self._logger.exception(str(ex)) raise def shutdownNode(self, session, nodespec: str, bSoftShutdown: bool = False) \ -> None: """ Raises: NodeNotFound """ try: nodes = self._nodesDbHandler.expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No matching nodes for nodespec [%s]' % (nodespec)) d = self.__processNodeList(nodes) for dbHardwareProfile, detailsDict in d.items(): # Get the ResourceAdapter adapter = self.__getResourceAdapter(dbHardwareProfile) # Call shutdown action extension adapter.shutdownNode(detailsDict['nodes'], bSoftShutdown) session.commit() except TortugaException: session.rollback() raise except Exception as ex: session.rollback() self._logger.exception(str(ex)) raise def rebootNode(self, session, nodespec: str, bSoftReset: bool = False, bReinstall: bool = False) -> None: """ Raises: NodeNotFound """ nodes = self._nodesDbHandler.expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) if bReinstall: for dbNode in nodes: self._bhm.setNodeForNetworkBoot(session, dbNode) for dbHardwareProfile, detailsDict in \ self.__processNodeList(nodes).items(): # iterate over hardware profile/nodes dict to reboot each # node adapter = self.__getResourceAdapter(dbHardwareProfile) # Call reboot action extension adapter.rebootNode(detailsDict['nodes'], bSoftReset) session.commit() def getNodesByNodeState(self, session, node_state: str, optionDict: Optional[OptionDict] = None) \ -> TortugaObjectList: """ Get nodes by state """ return self.__populate_nodes( session, self._nodeDbApi.getNodesByNodeState( session, node_state, optionDict=get_default_relations(optionDict))) def getNodesByNameFilter(self, session, nodespec: str, optionDict: OptionDict = None, include_installer: Optional[bool] = True) \ -> TortugaObjectList: """ Return TortugaObjectList of Node objects matching nodespec """ return self.__populate_nodes( session, self._nodeDbApi.getNodesByNameFilter( session, nodespec, optionDict=get_default_relations(optionDict), include_installer=include_installer)) def getNodesByAddHostSession(self, session, addHostSession: str, optionDict: OptionDict = None) \ -> TortugaObjectList: """ Return TortugaObjectList of Node objects matching add host session """ return self.__populate_nodes( session, self._nodeDbApi.getNodesByAddHostSession( session, addHostSession, optionDict=get_default_relations(optionDict))) def __processNodeList(self, dbNodes: List[NodeModel]) \ -> Dict[HardwareProfileModel, Dict[str, list]]: """ Returns dict indexed by hardware profile, each with a list of nodes in the hardware profile """ d: Dict[HardwareProfileModel, Dict[str, list]] = {} for dbNode in dbNodes: if dbNode.hardwareprofile not in d: d[dbNode.hardwareprofile] = { 'nodes': [], } d[dbNode.hardwareprofile]['nodes'].append(dbNode) return d def __getResourceAdapter(self, hardwareProfile: HardwareProfileModel): """ Raises: OperationFailed """ if not hardwareProfile.resourceadapter: raise OperationFailed( 'Hardware profile [%s] does not have an associated' ' resource adapter' % (hardwareProfile.name)) return resourceAdapterFactory.get_api( hardwareProfile.resourceadapter.name) \ if hardwareProfile.resourceadapter else None
class NodeManager(TortugaObjectManager): \ # pylint: disable=too-many-public-methods def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._hardwareProfileDbApi = HardwareProfileDbApi() self._cm = ConfigManager() self._san = san.San() def __validateHostName(self, hostname: str, name_format: str) -> NoReturn: """ Raises: ConfigurationError """ bWildcardNameFormat = (name_format == '*') if hostname and not bWildcardNameFormat: # Host name specified, but hardware profile does not # allow setting the host name raise ConfigurationError( 'Hardware profile does not allow setting host names' ' of imported nodes') elif not hostname and bWildcardNameFormat: # Host name not specified but hardware profile expects it raise ConfigurationError( 'Hardware profile requires host names to be set') def createNewNode(self, session: Session, addNodeRequest: dict, dbHardwareProfile: HardwareProfiles, dbSoftwareProfile: Optional[SoftwareProfiles] = None, validateIp: bool = True, bGenerateIp: bool = True, dns_zone: Optional[str] = None) -> Nodes: """ Convert the addNodeRequest into a Nodes object Raises: NicNotFound """ self.getLogger().debug( 'createNewNode(): session=[%s], addNodeRequest=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s],' ' validateIp=[%s], bGenerateIp=[%s]' % (id(session), addNodeRequest, dbHardwareProfile.name, dbSoftwareProfile.name if dbSoftwareProfile else '(none)', validateIp, bGenerateIp)) # This is where the Nodes() object is first created. node = Nodes() # Set the default node state node.state = 'Discovered' if 'rack' in addNodeRequest: node.rack = addNodeRequest['rack'] node.addHostSession = addNodeRequest['addHostSession'] hostname = addNodeRequest['name'] \ if 'name' in addNodeRequest else None # Ensure no conflicting options (ie. specifying host name for # hardware profile in which host names are generated) self.__validateHostName(hostname, dbHardwareProfile.nameFormat) node.name = hostname # Complete initialization of new node record nic_defs = addNodeRequest['nics'] \ if 'nics' in addNodeRequest else [] AddHostServerLocal().initializeNode(session, node, dbHardwareProfile, dbSoftwareProfile, nic_defs, bValidateIp=validateIp, bGenerateIp=bGenerateIp, dns_zone=dns_zone) # Set hardware profile of new node node.hardwareProfileId = dbHardwareProfile.id # Set software profile of new node; if the software profile is None, # attempt to set the software profile to the idle software profile # of the associated hardware profile. This may also be None, in # which case the software profile is undefined. node.softwareprofile = dbSoftwareProfile \ if dbSoftwareProfile else dbHardwareProfile.idlesoftwareprofile node.isIdle = dbSoftwareProfile.isIdle \ if dbSoftwareProfile else True # Return the new node return node def getNode(self, name, optionDict=None): """Get node by name""" optionDict_ = optionDict.copy() if optionDict else {} optionDict_.update({'hardwareprofile': True}) node = self._nodeDbApi.getNode(name, optionDict_) hwprofile = self._hardwareProfileDbApi.getHardwareProfile( node.getHardwareProfile().getName(), {'resourceadapter': True}) adapter_name = hwprofile.getResourceAdapter().getName() \ if hwprofile.getResourceAdapter() else 'default' # Query vcpus from resource adapter ResourceAdapterClass = resourceAdapterFactory.getResourceAdapterClass( adapter_name) # Update Node object node.setVcpus(ResourceAdapterClass().get_node_vcpus(node.getName())) return node def getNodeById(self, nodeId, optionDict=None): """ Get node by node id Raises: NodeNotFound """ return self._nodeDbApi.getNodeById(int(nodeId), optionDict) def getNodeByIp(self, ip): """ Get node by IP address Raises: NodeNotFound """ return self._nodeDbApi.getNodeByIp(ip) def getNodeList(self, tags=None): """Return all nodes""" return self._nodeDbApi.getNodeList(tags=tags) def updateNode(self, nodeName, updateNodeRequest): self.getLogger().debug('updateNode(): name=[{0}]'.format(nodeName)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) if 'nics' in updateNodeRequest: nic = updateNodeRequest['nics'][0] if 'ip' in nic: node.nics[0].ip = nic['ip'] node.nics[0].boot = True # Call resource adapter NodesDbHandler().updateNode(session, node, updateNodeRequest) run_post_install = False if 'state' in updateNodeRequest: run_post_install = node.state == 'Allocated' and \ updateNodeRequest['state'] == 'Provisioned' node.state = updateNodeRequest['state'] session.commit() if run_post_install: self.getLogger().debug( 'updateNode(): run-post-install for node [{0}]'.format( node.name)) self.__scheduleUpdate() except Exception: session.rollback() self.getLogger().exception( 'Exception updating node [{0}]'.format(nodeName)) finally: DbManager().closeSession() def updateNodeStatus(self, nodeName, state=None, bootFrom=None): """Update node status If neither 'state' nor 'bootFrom' are not None, this operation will update only the 'lastUpdated' timestamp. Returns: bool indicating whether state and/or bootFrom differed from current value """ value = 'None' if bootFrom is None else \ '1 (disk)' if int(bootFrom) == 1 else '0 (network)' self.getLogger().debug( 'updateNodeStatus(): node=[%s], state=[%s], bootFrom=[%s]' % (nodeName, state, value)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) result = self._updateNodeStatus(node, state=state, bootFrom=bootFrom) session.commit() return result finally: DbManager().closeSession() def _updateNodeStatus(self, dbNode, state=None, bootFrom=None): """ Internal method which takes a 'Nodes' object instead of a node name. """ result = NodesDbHandler().updateNodeStatus(dbNode, state, bootFrom) # Only change local boot configuration if the hardware profile is # not marked as 'remote' and we're not acting on the installer node. if dbNode.softwareprofile and \ dbNode.softwareprofile.type != 'installer' and \ dbNode.hardwareprofile.location not in \ ('remote', 'remote-vpn'): osUtility.getOsObjectFactory().getOsBootHostManager().\ writePXEFile(dbNode, localboot=bootFrom) return result def __process_nodeErrorDict(self, nodeErrorDict): result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, 'addHostSession': node.addHostSession, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def deleteNode(self, nodespec): """ Delete node by nodespec Raises: NodeNotFound """ installer_hostname = socket.getfqdn().split('.', 1)[0] session = DbManager().openSession() try: nodes = [] for node in self.__expand_nodespec(session, nodespec): if node.name.split('.', 1)[0] == installer_hostname: self.getLogger().info( 'Ignoring request to delete installer node' ' ([{0}])'.format(node.name)) continue nodes.append(node) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) self.__preDeleteHost(nodes) nodeErrorDict = NodesDbHandler().deleteNode(session, nodes) # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict) session.commit() # ============================================================ # Perform actions *after* node deletion(s) have been committed # to database. # ============================================================ self.__postDeleteHost(nodes_deleted) addHostSessions = set( [tmpnode['addHostSession'] for tmpnode in nodes_deleted]) if addHostSessions: AddHostManager().delete_sessions(addHostSessions) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() for nodeName in result['NodesDeleted']: # Remove the Puppet cert bhm.deletePuppetNodeCert(nodeName) bhm.nodeCleanup(nodeName) self.getLogger().info('Node [%s] deleted' % (nodeName)) # Schedule a cluster update self.__scheduleUpdate() return result except TortugaException: session.rollback() raise except Exception: session.rollback() self.getLogger().exception('Exception in NodeManager.deleteNode()') raise finally: DbManager().closeSession() def __process_delete_node_result(self, nodeErrorDict): # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def __preDeleteHost(self, nodes): self.getLogger().debug('__preDeleteHost(): nodes=[%s]' % (' '.join([node.name for node in nodes]))) if not nodes: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node in nodes: kitmgr.pre_delete_host( node.hardwareprofile.name, node.softwareprofile.name if node.softwareprofile else None, nodes=[node.name]) def __postDeleteHost(self, nodes_deleted): # 'nodes_deleted' is a list of dicts of the following format: # # { # 'name': 'compute-01', # 'softwareprofile': 'Compute', # 'hardwareprofile': 'LocalIron', # } # # if the node does not have an associated software profile, the # dict does not contain the key 'softwareprofile'. self.getLogger().debug('__postDeleteHost(): nodes_deleted=[%s]' % (nodes_deleted)) if not nodes_deleted: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node_dict in nodes_deleted: kitmgr.post_delete_host(node_dict['hardwareprofile'], node_dict['softwareprofile'] if 'softwareprofile' in node_dict else None, nodes=[node_dict['name']]) def __scheduleUpdate(self): tortugaSubprocess.executeCommand( os.path.join(self._cm.getRoot(), 'bin/schedule-update')) def getInstallerNode(self, optionDict=None): return self._nodeDbApi.getNode(self._cm.getInstaller(), optionDict=optionDict) def getProvisioningInfo(self, nodeName): return self._nodeDbApi.getProvisioningInfo(nodeName) def getKickstartFile(self, node, hardwareprofile, softwareprofile): """ Generate kickstart file for specified node Raises: OsNotSupported """ osFamilyName = softwareprofile.os.family.name try: osSupportModule = __import__('tortuga.os.%s.osSupport' % (osFamilyName), fromlist=['OSSupport']) except ImportError: raise OsNotSupported('Operating system family [%s] not supported' % (osFamilyName)) OSSupport = osSupportModule.OSSupport tmpOsFamilyInfo = OsFamilyInfo(softwareprofile.os.family.name, softwareprofile.os.family.version, softwareprofile.os.family.arch) return OSSupport(tmpOsFamilyInfo).getKickstartFileContents( node, hardwareprofile, softwareprofile) def __transferNodeCommon(self, session, dbDstSoftwareProfile, results): \ # pylint: disable=no-self-use # Aggregate list of transferred nodes based on hardware profile # to call resource adapter minimal number of times. hwProfileMap = {} for transferResultDict in results: dbNode = transferResultDict['node'] dbHardwareProfile = dbNode.hardwareprofile if dbHardwareProfile not in hwProfileMap: hwProfileMap[dbHardwareProfile] = [transferResultDict] else: hwProfileMap[dbHardwareProfile].append(transferResultDict) session.commit() nodeTransferDict = {} # Kill two birds with one stone... do the resource adapter # action as well as populate the nodeTransferDict. This saves # having to iterate twice on the same result data. for dbHardwareProfile, nodesDict in hwProfileMap.items(): adapter = resourceAdapterFactory.getApi( dbHardwareProfile.resourceadapter.name) dbNodeTuples = [] for nodeDict in nodesDict: dbNode = nodeDict['node'] dbSrcSoftwareProfile = nodeDict['prev_softwareprofile'] if dbSrcSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbSrcSoftwareProfile.name] = { 'added': [], 'removed': [dbNode], } else: nodeTransferDict[dbSrcSoftwareProfile.name]['removed'].\ append(dbNode) if dbDstSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbDstSoftwareProfile.name] = { 'added': [dbNode], 'removed': [], } else: nodeTransferDict[dbDstSoftwareProfile.name]['added'].\ append(dbNode) # The destination software profile is available through # node relationship. dbNodeTuples.append((dbNode, dbSrcSoftwareProfile)) adapter.transferNode(dbNodeTuples, dbDstSoftwareProfile) session.commit() # Now call the 'refresh' action to all participatory components KitActionsManager().refresh(nodeTransferDict) return results def transferNode(self, nodespec, dstSoftwareProfileName, bForce=False): """ Transfer nodes defined by 'nodespec' to 'dstSoftwareProfile' Raises: NodeNotFound SoftwareProfileNotFound NodeTransferNotValid """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) dbDstSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, dstSoftwareProfileName) results = NodesDbHandler().transferNode(session, nodes, dbDstSoftwareProfile, bForce=bForce) return self.__transferNodeCommon(session, dbDstSoftwareProfile, results) finally: DbManager().closeSession() def transferNodes(self, srcSoftwareProfileName, dstSoftwareProfileName, count, bForce=False): """ Transfer 'count' nodes from 'srcSoftwareProfile' to 'dstSoftwareProfile' Raises: SoftwareProfileNotFound """ session = DbManager().openSession() try: # It is not necessary to specify a source software profile. If # not specified, pick any eligible nodes in the hardware profile # mapped to the destination software profile. Don't ask me who # uses this capability, but it's here if you need it... dbSrcSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile( session, srcSoftwareProfileName) \ if srcSoftwareProfileName else None dbDstSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, dstSoftwareProfileName) results = NodesDbHandler().transferNodes(session, dbSrcSoftwareProfile, dbDstSoftwareProfile, int(float(count)), bForce=bForce) return self.__transferNodeCommon(session, dbDstSoftwareProfile, results) finally: DbManager().closeSession() def idleNode(self, nodespec): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) result = NodesDbHandler().idleNode(session, nodes) # Convert list of Nodes to list of node names for providing # user feedback. result_dict = {} for key, dbNodes in result.items(): result_dict[key] = [dbNode.name for dbNode in dbNodes] session.commit() # Remove Puppet certificate(s) for idled node(s) for node_name in result_dict['success']: # Remove Puppet certificate for idled node bhm = osUtility.getOsObjectFactory().getOsBootHostManager() bhm.deletePuppetNodeCert(node_name) # Schedule a cluster update self.__scheduleUpdate() return result_dict except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('[%s] %s' % (self.__class__.__name__, ex)) raise finally: DbManager().closeSession() def __process_activateNode_results(self, tmp_results, dstswprofilename): results = {} for key, values in tmp_results.items(): # With the exception of the "ProfileMappingNotAllowed" dict # item, all items in the dict are lists of nodes. if key != 'ProfileMappingNotAllowed': results[key] = [dbNode.name for dbNode in values] else: results[key] = \ [(value[0].name, value[1], value[2]) for value in values] if tmp_results['success']: # Iterate over activated nodes, creating dict keyed on # 'addHostSession' addHostSessions = {} for node in tmp_results['success']: if node.addHostSession not in addHostSessions: addHostSessions[node.addHostSession] = [] addHostSessions[node.addHostSession] = \ node.hardwareprofile.name # For each 'addHostSession', call postAddHost() for addHostSession, hwprofile in addHostSessions.items(): AddHostManager().postAddHost(hwprofile, dstswprofilename, addHostSession) return results def activateNode(self, nodespec, softwareProfileName): """ Raises: SoftwareProfileNotFound NodeNotFound TortugaException """ session = DbManager().openSession() try: dbSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, softwareProfileName) \ if softwareProfileName else None dbNodes = self.__expand_nodespec(session, nodespec) if not dbNodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) tmp_results = NodesDbHandler().activateNode( session, dbNodes, dbSoftwareProfile) results = self.__process_activateNode_results( tmp_results, softwareProfileName) session.commit() # Schedule a cluster update self.__scheduleUpdate() return results except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession() def startupNode(self, nodespec, remainingNodeList=None, bootMethod='n'): """ Raises: NodeNotFound """ return self._nodeDbApi.startupNode(nodespec, remainingNodeList=remainingNodeList or [], bootMethod=bootMethod) def shutdownNode(self, nodespec, bSoftShutdown=False): """ Raises: NodeNotFound """ return self._nodeDbApi.shutdownNode(nodespec, bSoftShutdown) def build_node_filterspec(self, nodespec): filter_spec = [] for nodespec_token in nodespec.split(','): # Convert shell-style wildcards into SQL wildcards if '*' in nodespec_token or '?' in nodespec_token: filter_spec.append( nodespec_token.replace('*', '%').replace('?', '_')) continue if '.' not in nodespec_token: filter_spec.append(nodespec_token) filter_spec.append(nodespec_token + '.%') continue # Add nodespec "AS IS" filter_spec.append(nodespec_token) return filter_spec def __expand_nodespec(self, session, nodespec): \ # pylint: disable=no-self-use # Expand wildcards in nodespec. Each token in the nodespec can # be wildcard that expands into one or more nodes. return NodesDbHandler().getNodesByNameFilter( session, self.build_node_filterspec(nodespec)) def rebootNode(self, nodespec, bSoftReset=False, bReinstall=False): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() if bReinstall: for dbNode in nodes: bhm.setNodeForNetworkBoot(dbNode) results = NodesDbHandler().rebootNode(session, nodes, bSoftReset) session.commit() return results finally: DbManager().closeSession() def checkpointNode(self, nodeName): return self._nodeDbApi.checkpointNode(nodeName) def revertNodeToCheckpoint(self, nodeName): return self._nodeDbApi.revertNodeToCheckpoint(nodeName) def migrateNode(self, nodeName, remainingNodeList, liveMigrate): return self._nodeDbApi.migrateNode(nodeName, remainingNodeList, liveMigrate) def evacuateChildren(self, nodeName): self._nodeDbApi.evacuateChildren(nodeName) def getChildrenList(self, nodeName): return self._nodeDbApi.getChildrenList(nodeName) def setParentNode(self, nodeName, parentNodeName): self._nodeDbApi.setParentNode(nodeName, parentNodeName) def addStorageVolume(self, nodeName, volume, isDirect="DEFAULT"): """ Raises: VolumeDoesNotExist UnsupportedOperation """ node = self.getNode(nodeName, {'hardwareprofile': True}) # Only allow persistent volumes to be attached... vol = self._san.getVolume(volume) if vol is None: raise VolumeDoesNotExist('Volume [%s] does not exist' % (volume)) if not vol.getPersistent(): raise UnsupportedOperation( 'Only persistent volumes can be attached') api = resourceAdapterFactory.getApi( node.getHardwareProfile().getResourceAdapter().getName()) if isDirect == "DEFAULT": return api.addVolumeToNode(node, volume) return api.addVolumeToNode(node, volume, isDirect) def removeStorageVolume(self, nodeName, volume): """ Raises: VolumeDoesNotExist UnsupportedOperation """ node = self.getNode(nodeName, {'hardwareprofile': True}) api = resourceAdapterFactory.getApi( node.getHardwareProfile().getResourceAdapter().getName()) vol = self._san.getVolume(volume) if vol is None: raise VolumeDoesNotExist('The volume [%s] does not exist' % (volume)) if not vol.getPersistent(): raise UnsupportedOperation( 'Only persistent volumes can be detached') return api.removeVolumeFromNode(node, volume) def getStorageVolumes(self, nodeName): return self._san.getNodeVolumes(self.getNode(nodeName).getName()) def getNodesByNodeState(self, state): return self._nodeDbApi.getNodesByNodeState(state) def getNodesByNameFilter(self, _filter): return self._nodeDbApi.getNodesByNameFilter(_filter)
class TortugaWsApi: """ Base tortuga ws api class. """ def __init__(self, username=None, password=None): self._logger = logging.getLogger('tortuga.wsapi.{0}'.format( self.__class__.__name__)) self._logger.addHandler(logging.NullHandler()) self._cm = ConfigManager() if username is None and password is None: self._logger.debug('[%s] Using built-in user credentials' % (self.__module__)) username = self._cm.getCfmUser() password = self._cm.getCfmPassword() self._username = username self._password = password self._sm = None def _getWsUrl(self, url): """Extract scheme and net location from provided url. Use defaults if none exist.""" result = urlparse(url) scheme = result.scheme if result.scheme else \ self._cm.getAdminScheme() netloc = result.netloc if result.netloc else \ '{0}:{1}'.format(self._cm.getInstaller(), self._cm.getAdminPort()) return '{0}://{1}'.format(scheme, netloc) def _getSessionManager(self): if not self._sm: self._sm = sessionManager.createSession() return self._sm def getLogger(self): """ Get logger for this class. """ return self._logger def getConfigManager(self): """ Return configmanager reference """ return self._cm def sendSessionRequest(self, url, method='GET', contentType='application/json', data='', acceptType='application/json'): """ Send authorized session request Raises: UserNotAuthorized """ sm = self._getSessionManager() if not sm.hasSession(): if self._username is None: raise UserNotAuthorized('Username not supplied') if self._password is None: raise UserNotAuthorized('Password not supplied') wsUrl = self._getWsUrl(url) # establishSession() sets the 'wsUrl' so the explicit call # to setHost() is not required sm.establishSession(wsUrl, self._username, self._password) return sm.sendRequest(url, method, contentType, data, acceptType=acceptType) def sendRequest(self, url, method='GET', contentType='application/json', data='', acceptType='application/json'): """ Send unauthorized request. """ sm = self._getSessionManager() # Because there's no call to establishSession(), explicitly call # setHost() sm.setHost(self._getWsUrl(url)) return self._getSessionManager().sendRequest(url, method, contentType, data, acceptType)
def get_puppet_node_yaml(session, nodeName): _cm = ConfigManager() publicInstallerFQDN = _cm.getInstaller().lower() primaryInstallerHostName = publicInstallerFQDN.split('.', 1)[0] try: dnsZone = GlobalParametersDbHandler().getParameter( session, 'DNSZone').value.lower() except ParameterNotFound: dnsZone = None try: depot_path = GlobalParametersDbHandler().getParameter( session, 'depot').value.lower() _cm.setDepotDir(depot_path) except ParameterNotFound: pass bInstaller = primaryInstallerHostName == nodeName.split('.', 1)[0] try: dbNode = NodesDbHandler().getNode(session, nodeName) except NodeNotFound: sys.exit(1) data = None try: from tortuga.db.dataRequestsDbHandler import DataRequestsDbHandler dbDataRequest = DataRequestsDbHandler().get_by_addHostSession( session, dbNode.addHostSession) if dbDataRequest: data = dbDataRequest.request except Exception as e: pass if dbNode.hardwareprofile.nics: privateInstallerFQDN = '%s%s%s' % (primaryInstallerHostName, get_installer_hostname_suffix( dbNode.hardwareprofile.nics[0], enable_interface_aliases=None), '.%s' % (dnsZone) if dnsZone else '') else: privateInstallerFQDN = '%s%s' % (primaryInstallerHostName, '.%s' % (dnsZone) if dnsZone else '') if not bInstaller and dbNode.hardwareprofile.location == 'local': # If the hardware profile does not have an associated provisioning # NIC, use the public installer FQDN by default. This can happen if # the user has added their own "public" nodes to a local hardware # profile. if not dbNode.hardwareprofile.nics: installerHostName = publicInstallerFQDN else: installerHostName = privateInstallerFQDN else: # If the specified node is the installer itself or a node # accessing the installer through it's public interface, use the # public host name. installerHostName = publicInstallerFQDN puppet_classes = {} enabledKits = set() if dbNode.softwareprofile: for dbComponent in dbNode.softwareprofile.components: if not dbComponent.kit.isOs: # # Load the kit and component installers # kit_spec = (dbComponent.kit.name, dbComponent.kit.version, dbComponent.kit.iteration) kit_installer = get_kit_installer(kit_spec)() kit_installer.session = session _component = kit_installer.get_component_installer( dbComponent.name) # # Get the puppet args for the component # try: puppet_class_args = _component.run_action( 'get_puppet_args', dbNode.softwareprofile, dbNode.hardwareprofile, data=data) if puppet_class_args is not None: puppet_classes[_component.puppet_class] = \ puppet_class_args except Exception: # noqa pylint: disable=broad-except # suppress exception if unable to get Puppet args puppet_classes[_component.puppet_class] = {} else: # # OS kit component is omitted on installer. The installer # is assumed to have a pre-existing OS repository # configuration. # if bInstaller: continue enabledKits.add(dbComponent.kit) dataDict = {} if puppet_classes: dataDict['classes'] = puppet_classes parametersDict = {} dataDict['parameters'] = parametersDict # software profile if dbNode.softwareprofile: parametersDict['swprofilename'] = dbNode.softwareprofile.name # hardware profile parametersDict['hwprofilename'] = dbNode.hardwareprofile.name # installer hostname parametersDict['primary_installer_hostname'] = installerHostName # Local repos directory repodir = os.path.join(_cm.getDepotDir(), 'kits') # Build YUM repository entries only if we have kits associated with # the software profile. if enabledKits: repourl = _cm.getIntWebRootUrl(installerHostName) + '/repos' \ if not bInstaller else 'file://{0}'.format(repodir) repo_type = None if dbNode.softwareprofile.os.family.name == 'rhel': repo_type = 'yum' # elif dbNode.softwareprofile.os.family == 'ubuntu': # repo_type = 'apt' if repo_type: # Only add 'repos' entries for supported operating system # families. repos_dict = {} for kit in enabledKits: if kit.isOs: verstr = str(kit.version) arch = kit.components[0].os[0].arch else: verstr = '%s-%s' % (kit.version, kit.iteration) arch = 'noarch' for dbKitSource in dbNode.softwareprofile.kitsources: if dbKitSource in kit.sources: baseurl = dbKitSource.url break else: subpath = '%s/%s/%s' % (kit.name, verstr, arch) if not kit.isOs and not os.path.exists( os.path.join(repodir, subpath, 'repodata/repomd.xml')): continue baseurl = '%s/%s' % (repourl, subpath) # [TODO] temporary workaround for handling RHEL media # path. # # This code is duplicated from tortuga.boot.distro if kit.isOs and \ dbNode.softwareprofile.os.name == 'rhel' and \ dbNode.softwareprofile.os.family.version != '7': subpath += '/Server' if repo_type == 'yum': if dbNode.hardwareprofile.location == 'remote': cost = 1200 else: cost = 1000 repos_dict['uc-kit-%s' % (kit.name)] = { 'type': repo_type, 'baseurl': baseurl, 'cost': cost, } if repos_dict: parametersDict['repos'] = repos_dict # Enable '3rdparty' repo if dbNode.softwareprofile: third_party_repo_subpath = '3rdparty/%s/%s/%s' % ( dbNode.softwareprofile.os.family.name, dbNode.softwareprofile.os.family.version, dbNode.softwareprofile.os.arch) local_repos_path = os.path.join(repodir, third_party_repo_subpath) # Check for existence of repository metadata to validate existence if enabledKits and os.path.exists( os.path.join(local_repos_path, 'repodata', 'repomd.xml')): third_party_repo_dict = { 'tortuga-third-party': { 'type': 'yum', 'baseurl': os.path.join(repourl, third_party_repo_subpath), }, } if 'repos' not in parametersDict: parametersDict['repos'] = third_party_repo_dict else: parametersDict['repos'] = dict( list(parametersDict['repos'].items()) + list(third_party_repo_dict.items())) # environment dataDict['environment'] = 'production' sys.stdout.write( yaml.safe_dump(dataDict, default_flow_style=False, explicit_start=True))
class NodeManager(TortugaObjectManager): \ # pylint: disable=too-many-public-methods def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._cm = ConfigManager() self._bhm = osUtility.getOsObjectFactory().getOsBootHostManager( self._cm) self._syncApi = SyncApi() self._nodesDbHandler = NodesDbHandler() self._addHostManager = AddHostManager() self._logger = logging.getLogger(NODE_NAMESPACE) def __validateHostName(self, hostname: str, name_format: str) -> None: """ Raises: ConfigurationError """ bWildcardNameFormat = (name_format == '*') if hostname and not bWildcardNameFormat: # Host name specified, but hardware profile does not # allow setting the host name raise ConfigurationError( 'Hardware profile does not allow setting host names' ' of imported nodes') elif not hostname and bWildcardNameFormat: # Host name not specified but hardware profile expects it raise ConfigurationError( 'Hardware profile requires host names to be set') def createNewNode(self, session: Session, addNodeRequest: dict, dbHardwareProfile: HardwareProfileModel, dbSoftwareProfile: Optional[SoftwareProfileModel] = None, validateIp: bool = True, bGenerateIp: bool = True, dns_zone: Optional[str] = None) -> NodeModel: """ Convert the addNodeRequest into a Nodes object Raises: NicNotFound """ self._logger.debug( 'createNewNode(): session=[%s], addNodeRequest=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s],' ' validateIp=[%s], bGenerateIp=[%s]' % (id(session), addNodeRequest, dbHardwareProfile.name, dbSoftwareProfile.name if dbSoftwareProfile else '(none)', validateIp, bGenerateIp)) hostname = addNodeRequest['name'] \ if 'name' in addNodeRequest else None # Ensure no conflicting options (ie. specifying host name for # hardware profile in which host names are generated) self.__validateHostName(hostname, dbHardwareProfile.nameFormat) node: Node = NodeModel(name=hostname) if 'rack' in addNodeRequest: node.rack = addNodeRequest['rack'] node.addHostSession = addNodeRequest['addHostSession'] # Complete initialization of new node record nic_defs = addNodeRequest['nics'] \ if 'nics' in addNodeRequest else [] AddHostServerLocal().initializeNode(session, node, dbHardwareProfile, dbSoftwareProfile, nic_defs, bValidateIp=validateIp, bGenerateIp=bGenerateIp, dns_zone=dns_zone) node.hardwareprofile = dbHardwareProfile node.softwareprofile = dbSoftwareProfile # # Fire the tags changed event for all creates that have tags... # we have to convert this to a node object because... our API # is inconsistent! # n = Node.getFromDbDict(node.__dict__) if n.getTags(): NodeTagsChanged.fire(node=n.getCleanDict(), previous_tags={}) # Return the new node return node def getNode(self, session: Session, name, optionDict: OptionDict = None) \ -> Node: """ Get node by name Raises: NodeNotFound """ return self.__populate_nodes(session, [ self._nodeDbApi.getNode( session, name, optionDict=get_default_relations(optionDict)) ])[0] def getNodeById(self, session: Session, nodeId: int, optionDict: OptionDict = None) -> Node: """ Get node by node id Raises: NodeNotFound """ return self.__populate_nodes(session, [ self._nodeDbApi.getNodeById( session, int(nodeId), optionDict=get_default_relations(optionDict)) ])[0] def getNodeByIp(self, session: Session, ip: str, optionDict: Dict[str, bool] = None) -> Node: """ Get node by IP address Raises: NodeNotFound """ return self.__populate_nodes(session, [ self._nodeDbApi.getNodeByIp( session, ip, optionDict=get_default_relations(optionDict)) ])[0] def getNodeList(self, session, tags=None, optionDict: Optional[OptionDict] = None) -> List[Node]: """ Return all nodes """ return self.__populate_nodes( session, self._nodeDbApi.getNodeList( session, tags=tags, optionDict=get_default_relations(optionDict))) def __populate_nodes(self, session: Session, nodes: List[Node]) \ -> List[Node]: """ Expand non-database fields in Node objects """ class SoftwareProfileMetadataCache(defaultdict): def __missing__(self, key): metadata = \ SoftwareProfileManager().get_software_profile_metadata( session, key ) self[key] = metadata return metadata swprofile_map = SoftwareProfileMetadataCache() for node in nodes: if not node.getSoftwareProfile(): continue node.getSoftwareProfile().setMetadata( swprofile_map[node.getSoftwareProfile().getName()]) return nodes def updateNode(self, session: Session, nodeName: str, updateNodeRequest: dict) -> None: """ Calls updateNode() method of resource adapter """ self._logger.debug('updateNode(): name=[{0}]'.format(nodeName)) try: # # Get the old version for comparison later # node_old: Node = self.getNode(session, nodeName) db_node = self._nodesDbHandler.getNode(session, nodeName) if 'nics' in updateNodeRequest: nic = updateNodeRequest['nics'][0] if 'ip' in nic: db_node.nics[0].ip = nic['ip'] db_node.nics[0].boot = True adapter = self.__getResourceAdapter(session, db_node.hardwareprofile) adapter.updateNode(session, db_node, updateNodeRequest) run_post_install = False if 'state' in updateNodeRequest: run_post_install = \ db_node.state == state.NODE_STATE_ALLOCATED and \ updateNodeRequest['state'] == state.NODE_STATE_PROVISIONED db_node.state = updateNodeRequest['state'] session.commit() # # Fire events as required # # Get the current/new state of the node from the DB # node: Node = self.getNode(session, nodeName) if node.getState() != node_old.getState(): NodeStateChanged.fire(node=node.getCleanDict(), previous_state=node_old.getState()) if node.getTags() != node_old.getTags(): NodeTagsChanged.fire(node=node.getCleanDict(), previous_tags=node_old.getTags()) if run_post_install: self._logger.debug( 'updateNode(): run-post-install for node [{0}]'.format( db_node.name)) self.__scheduleUpdate() except Exception: session.rollback() raise def updateNodeStatus(self, session: Session, nodeName: str, node_state: Optional[str] = None, bootFrom: int = None): """Update node status If neither 'state' nor 'bootFrom' are not None, this operation will update only the 'lastUpdated' timestamp. Returns: bool indicating whether state and/or bootFrom differed from current value """ value = 'None' if bootFrom is None else \ '1 (disk)' if int(bootFrom) == 1 else '0 (network)' self._logger.debug( 'updateNodeStatus(): node=[%s], node_state=[{%s}],' ' bootFrom=[{%s}]', nodeName, node_state, value) dbNode = self._nodesDbHandler.getNode(session, nodeName) # # Capture previous state and node data in dict form for the # event later on # previous_state = dbNode.state node_dict = Node.getFromDbDict(dbNode.__dict__).getCleanDict() # Bitfield representing node changes (0 = state change, # 1 = bootFrom # change) changed = 0 if node_state is not None and node_state != dbNode.state: # 'state' changed changed |= 1 if bootFrom is not None and bootFrom != dbNode.bootFrom: # 'bootFrom' changed changed |= 2 if changed: # Create custom log message msg = 'Node [%s] state change:' % (dbNode.name) if changed & 1: msg += ' state: [%s] -> [%s]' % (dbNode.state, node_state) dbNode.state = node_state node_dict['state'] = node_state if changed & 2: msg += ' bootFrom: [%d] -> [%d]' % (dbNode.bootFrom, bootFrom) dbNode.bootFrom = bootFrom self._logger.info(msg) else: self._logger.info('Updated timestamp for node [%s]' % (dbNode.name)) dbNode.lastUpdate = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(time.time())) result = bool(changed) # Only change local boot configuration if the hardware profile is # not marked as 'remote' and we're not acting on the installer # node. if dbNode.softwareprofile and \ dbNode.softwareprofile.type != 'installer' and \ dbNode.hardwareprofile.location != 'remote': # update local boot configuration for on-premise nodes self._bhm.writePXEFile(session, dbNode, localboot=bootFrom) session.commit() # # If the node state has changed, fire the node state changed # event # if state and (previous_state != state): NodeStateChanged.fire(node=node_dict, previous_state=previous_state) return result def deleteNode(self, session: Session, nodespec: str, force: bool = False): """ Delete nodes by node spec :param Session session: a database session :param str nodespec: a node spec :param bool force: whether or not this is a force operation """ try: nodes = self.__get_nodes_for_deletion(session, nodespec) kitmgr = KitActionsManager() kitmgr.session = session self.__delete_nodes(session, kitmgr, nodes) except Exception: session.rollback() raise def __get_nodes_for_deletion(self, session: Session, nodespec: str) -> List[NodeModel]: """ Gets a list of nodes from a node spec. :param Session session: a database session :param str nodespec: a node spec :raise NodeNotFound: """ nodes = self._nodesDbHandler.expand_nodespec(session, nodespec, include_installer=False) if not nodes: raise NodeNotFound( 'No nodes matching nodespec [{}]'.format(nodespec)) return nodes def __delete_nodes(self, session: Session, kitmgr: KitActionsManager, nodes: List[NodeModel]) -> None: """ :raises DeleteNodeFailed: """ hwprofile_nodes = self.__pre_delete_nodes(kitmgr, nodes) # commit node state changes to database session.commit() for hwprofile, node_data_dicts in hwprofile_nodes.items(): # build list of NodeModels node_objs: List[NodeModel] = [ node_data_dict['node'] for node_data_dict in node_data_dicts ] # Call resource adapter deleteNode() entry point self.__get_resource_adapter(session, hwprofile).deleteNode(node_objs) # Perform delete node action for each node in hwprofile for node_data_dict in node_data_dicts: # get JSON object for node record node_dict = NodeSchema( only=('hardwareprofile', 'softwareprofile', 'name', 'state'), exclude=('softwareprofile.metadata', )).dump( node_data_dict['node']).data # Delete the Node self._logger.debug('Deleting node [%s]', node_dict['name']) # # Fire node state change events # NodeStateChanged.fire( node=node_dict, previous_state=node_data_dict['previous_state']) session.delete(node_data_dict['node']) # Commit the actual deletion of this node to the DB. This is required # as the post_delete hooks may use a different DB session and we have # already commited some changes for this node. session.commit() self.__post_delete(kitmgr, node_dict) self._logger.info('Node [%s] deleted', node_dict['name']) # clean up add host session cache self._addHostManager.delete_sessions( set([ node.addHostSession for node in node_objs if node.addHostSession ])) self.__scheduleUpdate() def __pre_delete_nodes(self, kitmgr: KitActionsManager, nodes: List[NodeModel]) \ -> DefaultDict[HardwareProfileModel, List[NodeModel]]: """Collect nodes being deleted, call pre-delete kit action, mark them for deletion, and return dict containing nodes keyed by hardware profile. """ hwprofile_nodes = defaultdict(list) # # Mark node states as deleted in the database # for node in nodes: # call pre-delete host kit action kitmgr.pre_delete_host(node.hardwareprofile.name, get_node_swprofile_name(node), nodes=[node.name]) # # Capture previous state and node data as a dict for firing # the event later on # hwprofile_nodes[node.hardwareprofile].append({ 'node': node, 'previous_state': node.state }) # mark node deleted node.state = state.NODE_STATE_DELETED return hwprofile_nodes def __get_resource_adapter(self, session: Session, hardwareProfile: HardwareProfileModel): """ Raises: OperationFailed """ if not hardwareProfile.resourceadapter: raise OperationFailed( 'Hardware profile [%s] does not have an associated' ' resource adapter' % (hardwareProfile.name)) adapter = resourceAdapterFactory.get_api( hardwareProfile.resourceadapter.name) adapter.session = session return adapter def __process_delete_node_result(self, nodeErrorDict): # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def __post_delete(self, kitmgr: KitActionsManager, node: dict): """Call post-delete kit action for deleted node and clean up node state files (ie. Puppet certificate, etc.). 'node' is a JSON object representing the deleted node. """ kitmgr.post_delete_host(node['hardwareprofile']['name'], node['softwareprofile']['name'] if node['softwareprofile'] else None, nodes=[node['name']]) # remove Puppet cert, etc. self.__cleanup_node_state_files(node) def __cleanup_node_state_files(self, node_dict: dict): # Remove the Puppet cert self._bhm.deletePuppetNodeCert(node_dict['name']) self._bhm.nodeCleanup(node_dict['name']) def __scheduleUpdate(self): self._syncApi.scheduleClusterUpdate() def getInstallerNode(self, session, optionDict: Optional[OptionDict] = None): return self._nodeDbApi.getNode( session, self._cm.getInstaller(), optionDict=get_default_relations(optionDict)) def getProvisioningInfo(self, session: Session, nodeName): return self._nodeDbApi.getProvisioningInfo(session, nodeName) def startupNode(self, session, nodespec: str, remainingNodeList: List[NodeModel] = None, bootMethod: str = 'n') -> None: """ Raises: NodeNotFound """ try: nodes = self._nodesDbHandler.expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No matching nodes for nodespec [%s]' % (nodespec)) # Break list of nodes into dict keyed on hardware profile nodes_dict = self.__processNodeList(nodes) for dbHardwareProfile, detailsDict in nodes_dict.items(): # Get the ResourceAdapter adapter = self.__getResourceAdapter(session, dbHardwareProfile) # Call startup action extension adapter.startupNode(detailsDict['nodes'], remainingNodeList=remainingNodeList or [], tmpBootMethod=bootMethod) session.commit() except TortugaException: session.rollback() raise except Exception as ex: session.rollback() self._logger.exception(str(ex)) raise def shutdownNode(self, session, nodespec: str, bSoftShutdown: bool = False) -> None: """ Raises: NodeNotFound """ try: nodes = self._nodesDbHandler.expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No matching nodes for nodespec [%s]' % (nodespec)) d = self.__processNodeList(nodes) for dbHardwareProfile, detailsDict in d.items(): # Get the ResourceAdapter adapter = self.__getResourceAdapter(session, dbHardwareProfile) # Call shutdown action extension adapter.shutdownNode(detailsDict['nodes'], bSoftShutdown) session.commit() except TortugaException: session.rollback() raise except Exception as ex: session.rollback() self._logger.exception(str(ex)) raise def rebootNode(self, session, nodespec: str, bSoftReset: bool = False, bReinstall: bool = False) -> None: """ Raises: NodeNotFound """ nodes = self._nodesDbHandler.expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) if bReinstall: for dbNode in nodes: self._bhm.setNodeForNetworkBoot(session, dbNode) for dbHardwareProfile, detailsDict in \ self.__processNodeList(nodes).items(): # iterate over hardware profile/nodes dict to reboot each # node adapter = self.__getResourceAdapter(session, dbHardwareProfile) # Call reboot action extension adapter.rebootNode(detailsDict['nodes'], bSoftReset) session.commit() def getNodesByNodeState(self, session, node_state: str, optionDict: Optional[OptionDict] = None) \ -> TortugaObjectList: """ Get nodes by state """ return self.__populate_nodes( session, self._nodeDbApi.getNodesByNodeState( session, node_state, optionDict=get_default_relations(optionDict))) def getNodesByNameFilter(self, session, nodespec: str, optionDict: OptionDict = None, include_installer: Optional[bool] = True) \ -> TortugaObjectList: """ Return TortugaObjectList of Node objects matching nodespec """ return self.__populate_nodes( session, self._nodeDbApi.getNodesByNameFilter( session, nodespec, optionDict=get_default_relations(optionDict), include_installer=include_installer)) def getNodesByAddHostSession(self, session, addHostSession: str, optionDict: OptionDict = None) \ -> TortugaObjectList: """ Return TortugaObjectList of Node objects matching add host session """ return self.__populate_nodes( session, self._nodeDbApi.getNodesByAddHostSession( session, addHostSession, optionDict=get_default_relations(optionDict))) def __processNodeList(self, dbNodes: List[NodeModel]) \ -> Dict[HardwareProfileModel, Dict[str, list]]: """ Returns dict indexed by hardware profile, each with a list of nodes in the hardware profile """ d: Dict[HardwareProfileModel, Dict[str, list]] = {} for dbNode in dbNodes: if dbNode.hardwareprofile not in d: d[dbNode.hardwareprofile] = { 'nodes': [], } d[dbNode.hardwareprofile]['nodes'].append(dbNode) return d def __getResourceAdapter(self, session: Session, hardwareProfile: HardwareProfileModel) \ -> Optional[ResourceAdapter]: """ Raises: OperationFailed """ if not hardwareProfile.resourceadapter: raise OperationFailed( 'Hardware profile [%s] does not have an associated' ' resource adapter' % (hardwareProfile.name)) adapter = resourceAdapterFactory.get_api( hardwareProfile.resourceadapter.name) \ if hardwareProfile.resourceadapter else None if not adapter: return None adapter.session = session return adapter