def __init__(self): TortugaDbApi.__init__(self) self._softwareProfilesDbHandler = SoftwareProfilesDbHandler() self._nodesDbHandler = NodesDbHandler() self._globalParametersDbHandler = GlobalParametersDbHandler() self._adminsDbHandler = AdminsDbHandler() self._osDbHandler = OperatingSystemsDbHandler()
def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._cm = ConfigManager() self._bhm = osUtility.getOsObjectFactory().getOsBootHostManager( self._cm) self._nodesDbHandler = NodesDbHandler() self._addHostManager = AddHostManager() self._logger = logging.getLogger(NODE_NAMESPACE)
def __init__(self): TortugaDbApi.__init__(self) self._hardwareProfilesDbHandler = HardwareProfilesDbHandler() self._nodesDbHandler = NodesDbHandler() self._globalParametersDbHandler = GlobalParametersDbHandler() self._adminsDbHandler = AdminsDbHandler() self._nicsDbHandler = NicsDbHandler() self._resourceAdaptersDbHandler = ResourceAdaptersDbHandler() self._networkDevicesDbHandler = NetworkDevicesDbHandler() self._networksDbHandler = NetworksDbHandler()
def idleNode(self, nodespec): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) result = NodesDbHandler().idleNode(session, nodes) # Convert list of Nodes to list of node names for providing # user feedback. result_dict = {} for key, dbNodes in result.items(): result_dict[key] = [dbNode.name for dbNode in dbNodes] session.commit() # Remove Puppet certificate(s) for idled node(s) for node_name in result_dict['success']: # Remove Puppet certificate for idled node bhm = osUtility.getOsObjectFactory().getOsBootHostManager() bhm.deletePuppetNodeCert(node_name) # Schedule a cluster update self.__scheduleUpdate() return result_dict except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('[%s] %s' % (self.__class__.__name__, ex)) raise finally: DbManager().closeSession()
def rebootNode(self, nodespec, bSoftReset=False, bReinstall=False): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() if bReinstall: for dbNode in nodes: bhm.setNodeForNetworkBoot(dbNode) results = NodesDbHandler().rebootNode(session, nodes, bSoftReset) session.commit() return results finally: DbManager().closeSession()
def test_getNodesByTags_match_multiple(self): result = NodesDbHandler().getNodesByTags(self.session, { 'tag1': None, 'key2': None }) assert match_all_nodes(result)
def updateNodeStatus(self, nodeName, state=None, bootFrom=None): """Update node status If neither 'state' nor 'bootFrom' are not None, this operation will update only the 'lastUpdated' timestamp. Returns: bool indicating whether state and/or bootFrom differed from current value """ value = 'None' if bootFrom is None else \ '1 (disk)' if int(bootFrom) == 1 else '0 (network)' self.getLogger().debug( 'updateNodeStatus(): node=[%s], state=[%s], bootFrom=[%s]' % (nodeName, state, value)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) result = self._updateNodeStatus(node, state=state, bootFrom=bootFrom) session.commit() return result finally: DbManager().closeSession()
def test_getNodesByTags_one_match_one_nomatch(self): result = NodesDbHandler().getNodesByTags(self.session, { 'tag1': 'value1', 'nomatch': None }) assert match_all_nodes(result)
def test_getNodesByTag_non_contiguous(self): result = NodesDbHandler().getNodesByTags(self.session, {'tag5': None}) assert not set(['compute-01.private', 'compute-02.private', 'compute-08.private']) - \ set([node.name for node in result])
def transferNode(self, nodespec, dstSoftwareProfileName, bForce=False): """ Transfer nodes defined by 'nodespec' to 'dstSoftwareProfile' Raises: NodeNotFound SoftwareProfileNotFound NodeTransferNotValid """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) dbDstSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, dstSoftwareProfileName) results = NodesDbHandler().transferNode(session, nodes, dbDstSoftwareProfile, bForce=bForce) return self.__transferNodeCommon(session, dbDstSoftwareProfile, results) finally: DbManager().closeSession()
def test_getNodesByTags_match_multiple_with_values(self): result = NodesDbHandler().getNodesByTags(self.session, { 'tag1': 'value1', 'tag2': 'value2' }) assert match_all_nodes(result)
def _delete_host_action(self, hardware_profile_name, software_profile_name, action_name, *args, **kwargs): logger.debug('{}: {}, {}, {}, {}'.format(action_name, hardware_profile_name, software_profile_name, args, kwargs)) component_installers = self._get_enabled_component_installers( self._get_all_component_installers(base_kit_order='last')) from tortuga.db.dbManager import DbManager session = DbManager().openSession() # # Get all nodes marked as 'Deleted' that may still exist in the # database. We shouldn't have to do this, but occasionally a # transient condition (ie. unable to connect to hypervisor) occurs # and the delete node operation fails. # from tortuga.db.nodesDbHandler import NodesDbHandler nodes_db = NodesDbHandler() try: if software_profile_name: nodes = nodes_db.getNodeListByNodeStateAndSoftwareProfileName( session, 'Deleted', software_profile_name) else: nodes = nodes_db.getNodesByNodeState(session, 'Deleted') finally: DbManager().closeSession() if 'nodes' in kwargs: aggregated_nodes = list( set(kwargs['nodes']) | set([node.name for node in nodes])) del kwargs['nodes'] else: aggregated_nodes = nodes # # hardware profile currently undefined for delete_host() action, # so pass None as hardware profile name # self._run_action_with_node_list(component_installers, hardware_profile_name, software_profile_name, aggregated_nodes, action_name, *args, **kwargs)
def runCommand(self): self.parseArgs() with DbManager().session() as session: dbNode = NodesDbHandler().getNode(session, self._cm.getInstaller()) # Validate device name NetworkDevicesDbHandler().getNetworkDevice(session, self.getArgs().nic) # Ensure it is a provisioning NIC that is being deleted dbInstallerNic: Nic = None for dbInstallerNic in dbNode.hardwareprofile.nics: if dbInstallerNic.networkdevice.name == self.getArgs().nic: break else: raise NicNotFound('NIC [%s] is not a provisioning NIC' % (self.getArgs().nic)) hardwareProfiles = [ entry.hardwareprofile for entry in dbInstallerNic.network.hardwareprofilenetworks if entry.hardwareprofile != dbNode.hardwareprofile ] if hardwareProfiles: raise Exception('Hardware profile(s) are associated with this' ' provisioning NIC: [%s]' % (' '.join([hp.name for hp in hardwareProfiles]))) session.query( HardwareProfileNetwork).filter( HardwareProfileNetwork.network == dbInstallerNic.network).\ delete() session.query(HardwareProfileProvisioningNic).filter( HardwareProfileProvisioningNic.nic == dbInstallerNic).delete() dbNetworkId = dbInstallerNic.network.id networkDeviceId = dbInstallerNic.networkdevice.id session.delete(dbInstallerNic) session.query(Network).filter(Network.id == dbNetworkId).delete() self._deleteNetworkDevice(session, networkDeviceId) session.commit() bUpdated = self._updateNetworkConfig(session, dbNode) if bUpdated and self.getArgs().bSync: print('Applying changes to Tortuga...') cmd = 'puppet agent --onetime --no-daemonize >/dev/null 2>&1' tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def test_get_installer_node(self): """ Test get_installer_node against test harness """ result = NodesDbHandler().get_installer_node(self.session) assert result and result.softwareprofile.type == 'installer'
def test_getNodesByNodeState(dbm, state): with dbm.session() as session: result = NodesDbHandler().getNodesByNodeState(session, state) if not result: raise NodeNotFound('No nodes in state [{}]'.format(state)) assert result
def __expand_nodespec(self, session, nodespec): \ # pylint: disable=no-self-use # Expand wildcards in nodespec. Each token in the nodespec can # be wildcard that expands into one or more nodes. return NodesDbHandler().getNodesByNameFilter( session, self.build_node_filterspec(nodespec))
def test_expand_nodespec_default(dbm): installer = getfqdn() with dbm.session() as session: result = NodesDbHandler().expand_nodespec(session, '*') # the default in 'expand_nodespec' is to include the installer assert result and isinstance(result, list) and \ installer in [node.name for node in result]
def test_getNodesByNameFilter_default(dbm): installer = getfqdn() with dbm.session() as session: result = NodesDbHandler().getNodesByNameFilter(session, '%') assert result and \ isinstance(result, list) and \ installer in [node.name for node in result]
def test_getNodesByTags_nonexistent(self): tags = get_tags() nodes = get_nodes() populate(self.session, tags, nodes) result = NodesDbHandler().getNodesByTags(self.session, [('invalid_tag', )]) assert not result
def _get_nodes_from_nodespec(self, sess: Session, nodespec: str) -> List[Node]: """ Given a node spec, return the list of nodes matching the spec. :param Session sess: a database session :param str nodespec: a node spec :return List[Node]: a list of nodes matching the spec """ node_api = NodesDbHandler() nodes = node_api.expand_nodespec(sess, nodespec, include_installer=False) if not nodes: return [] return nodes
def __is_duplicate_mac(self, mac, session_nodes): if self.__is_duplicate_mac_in_session(mac, session_nodes): return True try: NodesDbHandler().getNodeByMac(self.session, mac) return True except NodeNotFound: return False
def test_getNodesByNameFilter_without_installer(dbm): installer = getfqdn() with dbm.session() as session: result = NodesDbHandler().getNodesByNameFilter(session, '%', include_installer=False) assert result and \ isinstance(result, list) and \ installer not in [node.name for node in result]
def updateNode(self, nodeName, updateNodeRequest): self.getLogger().debug('updateNode(): name=[{0}]'.format(nodeName)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) if 'nics' in updateNodeRequest: nic = updateNodeRequest['nics'][0] if 'ip' in nic: node.nics[0].ip = nic['ip'] node.nics[0].boot = True # Call resource adapter NodesDbHandler().updateNode(session, node, updateNodeRequest) run_post_install = False if 'state' in updateNodeRequest: run_post_install = node.state == 'Allocated' and \ updateNodeRequest['state'] == 'Provisioned' node.state = updateNodeRequest['state'] session.commit() if run_post_install: self.getLogger().debug( 'updateNode(): run-post-install for node [{0}]'.format( node.name)) self.__scheduleUpdate() except Exception: session.rollback() self.getLogger().exception( 'Exception updating node [{0}]'.format(nodeName)) finally: DbManager().closeSession()
def __init__(self, kit_installer): super().__init__(kit_installer) os_object_factory = osUtility.getOsObjectFactory() self._os_service_manager = os_object_factory.getOsServiceManager() self._software_profile_db_api = SoftwareProfileDbApi() # self._uge_mgmt_api = Uge_mgmt_api() self._node_api = NodesDbHandler() self.installer_hostname = socket.getfqdn() self._logger = logging.getLogger('{}.{}'.format( KIT_NAMESPACE, kit_installer.name))
def test_getNodesByTags_value_mismatch(self): tags = get_tags() nodes = get_nodes() populate(self.session, tags, nodes) result = NodesDbHandler().getNodesByTags(self.session, [( tags[0].name, 'invalid_value', )]) assert not result
def __init__(self): TortugaDbApi.__init__(self) from tortuga.db.hardwareProfilesDbHandler \ import HardwareProfilesDbHandler self._hardwareProfilesDbHandler = HardwareProfilesDbHandler() from tortuga.db.nodesDbHandler import NodesDbHandler from tortuga.db.nicsDbHandler import NicsDbHandler from tortuga.db.networksDbHandler import NetworksDbHandler self._nodesDbHandler = NodesDbHandler() self._nicsDbHandler = NicsDbHandler() self._networksDbHandler = NetworksDbHandler()
def test_getPXEReinstallSnippet(self): osFamilyInfo = OsFamilyInfo('rhel', '7', 'x86_64') osSupport = OSSupport(osFamilyInfo) with self.dbm.session() as session: node = NodesDbHandler().getNode(session, 'compute-01.private') ks_url = 'http://ksurl' result = osSupport.getPXEReinstallSnippet(ks_url, node) assert result and ks_url in result
def test_getNodeListByNodeStateAndSoftwareProfileName(dbm, state, swprofile): with dbm.session() as session: result = NodesDbHandler().getNodeListByNodeStateAndSoftwareProfileName( session, state, swprofile, ) if not result: raise NodeNotFound( 'No nodes in software profile [{}] in state [{}]'.format( state, swprofile)) assert result
def test_getNodesByTags_value_match(self): tags = get_tags() nodes = get_nodes() populate(self.session, tags, nodes) result = NodesDbHandler().getNodesByTags( self.session, [(tags[0].name, tags[0].value)]) assert result assert nodes[0] in result assert nodes[1] in result assert nodes[2] in result assert nodes[3] in result
def __is_duplicate_mac(self, mac, session_nodes): if self.__is_duplicate_mac_in_session(mac, session_nodes): return True session = DbManager().openSession() try: NodesDbHandler().getNodeByMac(session, mac) return True except NodeNotFound: return False finally: DbManager().closeSession()