def postAddHost(self, session: Session, hardwareProfileName: str, softwareProfileName: Optional[str], addHostSession: str) -> None: """ Perform post add host operations """ self._logger.debug( 'postAddHost(): hardwareProfileName=[%s]' ' softwareProfileName=[%s] addHostSession=[%s]' % ( hardwareProfileName, softwareProfileName, addHostSession)) # this query is redundant; in the calling method, we already have # a list of Node (db) objects from tortuga.node.nodeApi import NodeApi nodes = NodeApi().getNodesByAddHostSession(session, addHostSession) mgr = KitActionsManager() mgr.session = session mgr.post_add_host( hardwareProfileName, softwareProfileName, nodes ) # Always go over the web service for this call. SyncWsApi().scheduleClusterUpdate(updateReason='Node(s) added')
def __pre_delete_nodes(self, kitmgr: KitActionsManager, nodes: List[NodeModel]) \ -> DefaultDict[HardwareProfileModel, List[NodeModel]]: """Collect nodes being deleted, call pre-delete kit action, mark them for deletion, and return dict containing nodes keyed by hardware profile. """ hwprofile_nodes = defaultdict(list) # # Mark node states as deleted in the database # for node in nodes: # call pre-delete host kit action kitmgr.pre_delete_host(node.hardwareprofile.name, get_node_swprofile_name(node), nodes=[node.name]) # # Capture previous state and node data as a dict for firing # the event later on # hwprofile_nodes[node.hardwareprofile].append({ 'node': node, 'previous_state': node.state }) # mark node deleted node.state = state.NODE_STATE_DELETED return hwprofile_nodes
def __postDeleteHost(self, nodes_deleted): # 'nodes_deleted' is a list of dicts of the following format: # # { # 'name': 'compute-01', # 'softwareprofile': 'Compute', # 'hardwareprofile': 'LocalIron', # } # # if the node does not have an associated software profile, the # dict does not contain the key 'softwareprofile'. self.getLogger().debug('__postDeleteHost(): nodes_deleted=[%s]' % (nodes_deleted)) if not nodes_deleted: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node_dict in nodes_deleted: kitmgr.post_delete_host(node_dict['hardwareprofile'], node_dict['softwareprofile'] if 'softwareprofile' in node_dict else None, nodes=[node_dict['name']])
def __preDeleteHost(self, kitmgr: KitActionsManager, nodes): self._logger.debug('__preDeleteHost(): nodes=[%s]' % (' '.join([node.name for node in nodes]))) for node in nodes: kitmgr.pre_delete_host( node.hardwareprofile.name, node.softwareprofile.name if node.softwareprofile else None, nodes=[node.name])
def __post_delete(self, kitmgr: KitActionsManager, node: dict): """Call post-delete kit action for deleted node and clean up node state files (ie. Puppet certificate, etc.). 'node' is a JSON object representing the deleted node. """ kitmgr.post_delete_host(node['hardwareprofile']['name'], node['softwareprofile']['name'] if node['softwareprofile'] else None, nodes=[node['name']]) # remove Puppet cert, etc. self.__cleanup_node_state_files(node)
def postAddHost(self, hardwareProfileName, softwareProfileName, addHostSession): """Perform post add host operations""" self.getLogger().debug( 'postAddHost(): hardwareProfileName=[%s]' ' softwareProfileName=[%s] addHostSession=[%s]' % (hardwareProfileName, softwareProfileName, addHostSession)) mgr = KitActionsManager() mgr.post_add_host(hardwareProfileName, softwareProfileName, addHostSession) # Always go over the web service for this call. SyncWsApi().scheduleClusterUpdate(updateReason='Node(s) added')
def __preDeleteHost(self, nodes): self.getLogger().debug('__preDeleteHost(): nodes=[%s]' % (' '.join([node.name for node in nodes]))) if not nodes: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node in nodes: kitmgr.pre_delete_host( node.hardwareprofile.name, node.softwareprofile.name if node.softwareprofile else None, nodes=[node.name])
def deleteNode(self, session, nodespec: str, force: bool = False): """Delete nodes by nodespec :raises NodeNotFound: no matching nodes for nodespec """ try: nodes = self.__get_nodes_for_deletion(session, nodespec, force) kitmgr = KitActionsManager() kitmgr.session = session # perform actual node deletion self.__delete_nodes(session, kitmgr, nodes) except Exception: session.rollback() raise
def deleteNode(self, session: Session, nodespec: str, force: bool = False): """ Delete nodes by node spec :param Session session: a database session :param str nodespec: a node spec :param bool force: whether or not this is a force operation """ try: nodes = self.__get_nodes_for_deletion(session, nodespec) kitmgr = KitActionsManager() kitmgr.session = session self.__delete_nodes(session, kitmgr, nodes) except Exception: session.rollback() raise
def deleteNode(self, session, nodespec: str, force: bool = False): """ Delete node by nodespec Raises: NodeNotFound """ kitmgr = KitActionsManager() kitmgr.session = session try: nodes = self._nodesDbHandler.expand_nodespec( session, nodespec, include_installer=False) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) self.__validate_delete_nodes_request(nodes, force) self.__preDeleteHost(kitmgr, nodes) nodeErrorDict = self.__delete_node(session, nodes) # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict) session.commit() # ============================================================ # Perform actions *after* node deletion(s) have been committed # to database. # ============================================================ self.__postDeleteHost(kitmgr, nodes_deleted) addHostSessions = set( [tmpnode['addHostSession'] for tmpnode in nodes_deleted]) if addHostSessions: self._addHostManager.delete_sessions(addHostSessions) for nodeName in result['NodesDeleted']: # Remove the Puppet cert self._bhm.deletePuppetNodeCert(nodeName) self._bhm.nodeCleanup(nodeName) self._logger.info('Node [%s] deleted' % (nodeName)) # Schedule a cluster update self.__scheduleUpdate() return result except Exception: session.rollback() raise
def __transferNodeCommon(self, session, dbDstSoftwareProfile, results): \ # pylint: disable=no-self-use # Aggregate list of transferred nodes based on hardware profile # to call resource adapter minimal number of times. hwProfileMap = {} for transferResultDict in results: dbNode = transferResultDict['node'] dbHardwareProfile = dbNode.hardwareprofile if dbHardwareProfile not in hwProfileMap: hwProfileMap[dbHardwareProfile] = [transferResultDict] else: hwProfileMap[dbHardwareProfile].append(transferResultDict) session.commit() nodeTransferDict = {} # Kill two birds with one stone... do the resource adapter # action as well as populate the nodeTransferDict. This saves # having to iterate twice on the same result data. for dbHardwareProfile, nodesDict in hwProfileMap.items(): adapter = resourceAdapterFactory.getApi( dbHardwareProfile.resourceadapter.name) dbNodeTuples = [] for nodeDict in nodesDict: dbNode = nodeDict['node'] dbSrcSoftwareProfile = nodeDict['prev_softwareprofile'] if dbSrcSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbSrcSoftwareProfile.name] = { 'added': [], 'removed': [dbNode], } else: nodeTransferDict[dbSrcSoftwareProfile.name]['removed'].\ append(dbNode) if dbDstSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbDstSoftwareProfile.name] = { 'added': [dbNode], 'removed': [], } else: nodeTransferDict[dbDstSoftwareProfile.name]['added'].\ append(dbNode) # The destination software profile is available through # node relationship. dbNodeTuples.append((dbNode, dbSrcSoftwareProfile)) adapter.transferNode(dbNodeTuples, dbDstSoftwareProfile) session.commit() # Now call the 'refresh' action to all participatory components KitActionsManager().refresh(nodeTransferDict) return results