def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._hardwareProfileDbApi = HardwareProfileDbApi() self._cm = ConfigManager() self._san = san.San()
def __init__(self): super(HardwareProfileManager, self).__init__() self._hpDbApi = HardwareProfileDbApi() self._spDbApi = SoftwareProfileDbApi() self._networkDbApi = NetworkDbApi() self._globalParameterDbApi = GlobalParameterDbApi() self._nodeDbApi = NodeDbApi()
def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._hardwareProfileDbApi = HardwareProfileDbApi() self._cm = ConfigManager() self._san = san.San() self._bhm = osUtility.getOsObjectFactory().getOsBootHostManager()
def __init__(self): super(HardwareProfileManager, self).__init__() self._hpDbApi = HardwareProfileDbApi() self._spDbApi = SoftwareProfileDbApi() self._networkDbApi = NetworkDbApi() self._globalParameterDbApi = GlobalParameterDbApi() self._nodeDbApi = NodeDbApi() self._logger = logging.getLogger(HARDWARE_PROFILE_NAMESPACE)
def test_launchActive(): hwProfileName = 'gce' swProfileName = 'BasicCompute' hwProfile = HardwareProfileDbApi().getHardwareProfile(hwProfileName) swProfile = SoftwareProfileDbApi().getSoftwareProfile(swProfileName) gceAdapter = tortuga.resourceAdapter.gce.Gce() resourceAdapterConfig = {} session = gceAdapter._Gce__initSession(resourceAdapterConfig, swProfile=swProfile, hwProfile=hwProfile) nodeCreateDict = { 'nodeCount': 5, 'hardwareProfile': hwProfile, 'softwareProfile': swProfile, 'nodeDetails': [], 'deviceName': None, 'rackNumber': 1, 'resourceUnits': 1, 'supportedUnits': 1, } gceAdapter._Gce__launchActive(session, nodeCreateDict)
def test_getHardwareProfile(dbm): """ Get hardware profile with default resource adapter configuration defined. """ name = 'aws' with dbm.session() as session: result = HardwareProfileDbApi().getHardwareProfile(session, name) assert isinstance(result, HardwareProfile) assert result.getName() == name assert result.getDefaultResourceAdapterConfig() is None assert result.getResourceAdapter()
def runCommand(self): self.parseArgs( _(""" Display list of nodes able to use the specified software profile, ordered by cost. """)) softwareProfileName = self.getArgs().softwareProfile nodeApi = NodeApi() softwareUsesHardwareDbApi = SoftwareUsesHardwareDbApi() hardwareProfileDbApi = HardwareProfileDbApi() load_kits() with DbManager().session() as session: hwPList = hardwareProfileDbApi.getHardwareProfileList(session) hardwareProfileIdList = softwareUsesHardwareDbApi.\ getAllowedHardwareProfilesBySoftwareProfileName( session, softwareProfileName) nodeList = nodeApi.getNodeList(session) usableNodes = [] for node in nodeList: if (node.getHardwareProfile().getId() in hardwareProfileIdList) \ and node.getIsIdle(): usableNodes.append(node) costNameList = [] for node in usableNodes: nodeHwP = node.getHardwareProfile().getId() for hwP in hwPList: if hwP.getId() == nodeHwP: costNameList.append( [int(hwP.getCost()), node.getName()]) break costNameList.sort() for node in costNameList: print('%s' % (node[1]))
def test_getHardwareProfile_alt(dbm): """ Get hardware profile with default resource adapter configuration defined. """ name = 'aws2' with dbm.session() as session: result = HardwareProfileDbApi().getHardwareProfile(session, name) assert isinstance(result, HardwareProfile) assert result.getName() == name # 'nondefault' is the known default resource adapter configuration # profile for the hardware profile 'aws2' assert result.getDefaultResourceAdapterConfig() == 'nondefault' assert result.getResourceAdapter()
def test_getConfig(): hwProfileName = 'gce' swProfileName = 'BasicCompute' hwProfile = HardwareProfileDbApi().getHardwareProfile(hwProfileName) swProfile = SoftwareProfileDbApi().getSoftwareProfile(swProfileName) gceAdapter = tortuga.resourceAdapter.gce.Gce() configDict = gceAdapter._Gce__getConfig(resourceAdapterConfig={}, swProfile=swProfile, hwProfile=hwProfile) pprint.pprint(configDict)
def __init__(self): OsObjectManager.__init__(self) # Cache this for later try: self.passdata = pwd.getpwnam('apache') except KeyError: self.passdata = pwd.getpwnam(os.getenv('USER')) self.hardwareProfileDbApi = HardwareProfileDbApi() self.softwareProfileDbApi = SoftwareProfileDbApi() self._nodeApi = nodeApi.NodeApi() self._cm = ConfigManager()
def test_initSession(): hwProfileName = 'gce' swProfileName = 'BasicCompute' hwProfile = HardwareProfileDbApi().getHardwareProfile(hwProfileName) swProfile = SoftwareProfileDbApi().getSoftwareProfile(swProfileName) gceAdapter = tortuga.resourceAdapter.gce.Gce() resourceAdapterConfig = {} session = gceAdapter._Gce__initSession(resourceAdapterConfig, swProfile=swProfile, hwProfile=hwProfile) return session
def main(): swProfileName = 'BasicCompute' hwProfileName = 'rackspace' hwProfile = HardwareProfileDbApi().getHardwareProfile(hwProfileName) swProfile = SoftwareProfileDbApi().getSoftwareProfile(swProfileName) osAdapter = Openstack() # import pdb; pdb.set_trace() session = osAdapter._Openstack__initSession(hwProfile=hwProfile, swProfile=swProfile) instance = osAdapter._Openstack__getInstance( session, '9d232b72-d50b-4018-8b76-febf503b722f')
def test_updateHardwareProfileTags(dbm): api = HardwareProfileDbApi() tags = {'tag1': 'tag1 value', 'tag2': 'tag2 value'} with dbm.session() as session: hwprofile = api.getHardwareProfile(session, 'notags') # # Set tags # hwprofile.setTags({'tag1': 'tag1 value', 'tag2': 'tag2 value'}) api.updateHardwareProfile(session, hwprofile) session.commit() hwprofile = api.getHardwareProfile(session, 'notags') assert hwprofile.getTags() == tags # # Remove tags # hwprofile.setTags({}) api.updateHardwareProfile(session, hwprofile) session.commit() hwprofile = api.getHardwareProfile(session, 'notags') assert hwprofile.getTags() == {}
def __init__(self, *args, **kwargs): self._hwp_api = HardwareProfileDbApi() self._node_api = NodeDbApi() self._swp_api = SoftwareProfileDbApi() super().__init__(*args, **kwargs)
class UctagCli(TortugaCli): def __init__(self, *args, **kwargs): self._hwp_api = HardwareProfileDbApi() self._node_api = NodeDbApi() self._swp_api = SoftwareProfileDbApi() super().__init__(*args, **kwargs) def parseArgs(self, usage: Optional[str] = None): subparsers = self.getParser().add_subparsers(help='sub-command help', dest='subparser_name') add_subparser = subparsers.add_parser('add') add_subparser.add_argument('--node', dest='nodespec') add_subparser.add_argument('--software-profile', metavar='NAME') add_subparser.add_argument('--hardware-profile', metavar='NAME') add_subparser.add_argument('--tags', action='append', dest='tags', metavar='key=value[,key=value]') add_subparser.set_defaults(func=self.add_tag) remove_subparser = subparsers.add_parser('remove') remove_subparser.add_argument('--node', dest='nodespec') remove_subparser.add_argument('--software-profile', metavar='NAME') remove_subparser.add_argument('--hardware-profile', metavar='NAME') remove_subparser.add_argument('--tags', action='append', dest='tags', metavar='key[,key]') remove_subparser.set_defaults(func=self.remove_tag) list_subparser = subparsers.add_parser('list') list_subparser.add_argument('--all-resources', action='store_true') list_subparser.add_argument('--nodes', action='store_true') list_subparser.add_argument('--software-profiles', action='store_true') list_subparser.add_argument('--hardware-profiles', action='store_true') list_subparser.set_defaults(func=self.list_tag) return super().parseArgs(usage=usage) def runCommand(self): args = self.parseArgs() with DbManager().session() as session: args.func(session, args) def add_tag(self, session: Session, args): if not args.nodespec and not args.software_profile and \ not args.hardware_profile: sys.stderr.write('Error: must specify --nodes' '/--software-profile/--hardware-profile\n') sys.stderr.flush() sys.exit(1) tags = parse_tags(args.tags) if args.nodespec: nodes = self._node_api.getNodesByNameFilter(session, args.nodespec) for node in nodes: node_tags = node.getTags() node_tags.update(tags) self._node_api.set_tags(session, node_id=node.getId(), tags=node_tags) print(node.getName(), node.getTags()) if args.software_profile: for name in args.software_profile.split(','): swp = self._swp_api.getSoftwareProfile(session, name) swp_tags = swp.getTags() swp_tags.update(tags) swp.setTags(swp_tags) self._swp_api.updateSoftwareProfile(session, swp) if args.hardware_profile: for name in args.hardware_profile.split(','): hwp = self._hwp_api.getHardwareProfile(session, name) hwp_tags = hwp.getTags() hwp_tags.update(tags) hwp.setTags(hwp_tags) self._hwp_api.updateHardwareProfile(session, hwp) session.commit() def remove_tag(self, session: Session, args): if not args.nodespec and not args.software_profile and \ not args.hardware_profile: sys.stderr.write('Error: must specify --nodes' '/--software-profile/--hardware-profile\n') sys.stderr.flush() sys.exit(1) tag_keys = [] for tag_string in args.tags: tag_keys.extend(tag_string.split(',')) if args.nodespec: nodes = self._node_api.getNodesByNameFilter(session, args.nodespec) for node in nodes: node_tags = node.getTags() for key in tag_keys: if key in node_tags.keys(): node_tags.pop(key) self._node_api.set_tags(session, node_id=node.getId(), tags=node_tags) print(node.getName(), node.getTags()) if args.software_profile: for name in args.software_profile.split(','): swp = self._swp_api.getSoftwareProfile(session, name) swp_tags = swp.getTags() for key in tag_keys: if key in swp_tags.keys(): swp_tags.pop(key) swp.setTags(swp_tags) self._swp_api.updateSoftwareProfile(session, swp) if args.hardware_profile: for name in args.hardware_profile.split(','): hwp = self._hwp_api.getHardwareProfile(session, name) hwp_tags = hwp.getTags() for key in tag_keys: if key in hwp_tags.keys(): hwp_tags.pop(key) hwp.setTags(hwp_tags) self._hwp_api.updateHardwareProfile(session, hwp) session.commit() def list_tag(self, session: Session, args): report = TagReport() if args.all_resources or args.nodes: for node in self._node_api.getNodeList(session): for key, value in node.getTags().items(): report.add_node(key, value, node) if args.all_resources or args.software_profiles: for swp in self._swp_api.getSoftwareProfileList(session): for key, value in swp.getTags().items(): report.add_swp(key, value, swp) if args.all_resources or args.hardware_profiles: for hwp in self._hwp_api.getHardwareProfileList(session): for key, value in hwp.getTags().items(): report.add_hwp(key, value, hwp) for key, values in report.keys.items(): for value, types in values.items(): print('{} = {}:'.format(key, value)) for type_, names in types.items(): print(' {}:'.format(type_)) for name in names: print(' - {}'.format(name))
'hardwareProfile': hwProfile, 'softwareProfile': swProfile, 'nodeDetails': [], 'deviceName': None, 'rackNumber': 1, 'resourceUnits': 1, 'supportedUnits': 1, } gceAdapter._Gce__launchActive(session, nodeCreateDict) hwProfileName = 'gce' swProfileName = 'BasicCompute' hwProfile = HardwareProfileDbApi().getHardwareProfile(hwProfileName) swProfile = SoftwareProfileDbApi().getSoftwareProfile(swProfileName) gceAdapter = tortuga.resourceAdapter.gce.Gce() session = gceAdapter._Gce__initSession(swProfile=swProfile, hwProfile=hwProfile) # instance = gceAdapter._Gce__getInstance(session, instance_name='gce-02-local') # for intfc in instance [u'networkInterfaces']: # for access_cfg in intfc[u'accessConfigs']: # print access_cfg[u'natIP'] # gceAdapter._Gce__deleteInstance(session, instance_name='gce-01-local') # gceAdapter._Gce__deleteInstance(session, instance_name='gce-02-local')
class HardwareProfileManager(TortugaObjectManager, Singleton): def __init__(self): super(HardwareProfileManager, self).__init__() self._hpDbApi = HardwareProfileDbApi() self._spDbApi = SoftwareProfileDbApi() self._networkDbApi = NetworkDbApi() self._globalParameterDbApi = GlobalParameterDbApi() self._nodeDbApi = NodeDbApi() def getHardwareProfileList(self, optionDict=None, tags=None): """ Return all of the hardwareprofiles with referenced components in this hardwareprofile """ return self._hpDbApi.getHardwareProfileList(optionDict=optionDict, tags=tags) def setIdleSoftwareProfile(self, hardwareProfileName, softwareProfileName=None): """Set idle software profile""" return self._hpDbApi.setIdleSoftwareProfile(hardwareProfileName, softwareProfileName) def getHardwareProfile(self, name, optionDict=None): return self._hpDbApi.getHardwareProfile(name, optionDict or {}) def getHardwareProfileById(self, id_, optionDict=None): return self._hpDbApi.getHardwareProfileById(id_, optionDict or {}) def addAdmin(self, hardwareProfileName, adminUsername): """ Add an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound HardwareProfileNotFound """ return self._hpDbApi.addAdmin(hardwareProfileName, adminUsername) def deleteAdmin(self, hardwareProfileName, adminUsername): """ Remove an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound HardwareProfileNotFound """ return self._hpDbApi.deleteAdmin(hardwareProfileName, adminUsername) def updateHardwareProfile(self, hardwareProfileObject): """ Update a hardware profile in the database that matches the passed in hardware profile object. The ID is used as the primary matching criteria. Returns: None Throws: TortugaException HardwareProfileNotFound InvalidArgument """ self.getLogger().debug('Updating hardware profile [%s]' % (hardwareProfileObject.getName())) try: # First get the object from the db we are updating... existingProfile = self.\ getHardwareProfileById(hardwareProfileObject.getId()) if hardwareProfileObject.getInstallType() and \ hardwareProfileObject.getInstallType() != \ existingProfile.getInstallType(): raise InvalidArgument( 'Hardware profile installation type cannot be' ' changed' % (hardwareProfileObject.getName())) self._hpDbApi.updateHardwareProfile(hardwareProfileObject) except TortugaException as ex: raise except Exception as ex: self.getLogger().exception('%s' % ex) raise TortugaException(exception=ex) def createHardwareProfile(self, hwProfileSpec, settingsDict=None): settingsDict = settingsDict or {} bUseDefaults = settingsDict['bUseDefaults'] \ if 'bUseDefaults' in settingsDict else False osInfo = settingsDict['osInfo'] \ if settingsDict and 'osInfo' in settingsDict else None validation.validateProfileName(hwProfileSpec.getName()) if hwProfileSpec.getDescription() is None or \ hwProfileSpec.getDescription() == '**DEFAULT**': hwProfileSpec.setDescription('%s Nodes' % (hwProfileSpec.getName())) installerNode = self._nodeDbApi.getNode(ConfigManager().getInstaller(), {'softwareprofile': True}) if bUseDefaults: if not hwProfileSpec.getNetworks(): # No <network>...</network> entries found in the template, # use the default provisioning interface from the primary # installer. # Find first provisioning network and use it for nic in installerNode.getNics(): network = nic.getNetwork() if network.getType() == 'provision': # for now set the default interface to be index 0 # with the same device networkDevice = fixNetworkDeviceName( nic.getNetworkDevice().getName()) network.setNetworkDevice( NetworkDevice(name=networkDevice)) hwProfileSpec.getNetworks().append(network) break else: raise NetworkNotFound( 'Unable to find provisioning network') else: # Ensure network device is defined installerNic = None for network in hwProfileSpec.getNetworks(): for installerNic in installerNode.getNics(): installerNetwork = installerNic.getNetwork() if network.getId() and \ network.getId() == installerNetwork.getId(): break elif network.getAddress() and \ network.getAddress() == \ installerNetwork.getAddress() and \ network.getNetmask() and \ network.getNetmask() == \ installerNetwork.getNetmask(): break else: # Unable to find network matching specification in # template. raise NetworkNotFound( 'Unable to find provisioning network [%s]' % (network)) networkDevice = fixNetworkDeviceName( installerNic.getNetworkDevice().getName()) network.setNetworkDevice(NetworkDevice(name=networkDevice)) if hwProfileSpec.getIdleSoftwareProfile(): # <idleSoftwareProfileId>...</idleSoftwareProfileId> is always # contained within the output of get-hardwareprofile. If the # command-line option '--idleSoftwareProfile' is specified, it # overrides the # <idleSoftwareProfileId>...</idleSoftwareProfileId> element idleSoftwareProfile = self._spDbApi.getSoftwareProfile( hwProfileSpec.getIdleSoftwareProfile().getName()) hwProfileSpec.setIdleSoftwareProfileId(idleSoftwareProfile.getId()) if not osInfo: osInfo = installerNode.getSoftwareProfile().getOsInfo() osObjFactory = osUtility.getOsObjectFactory(osInfo.getName()) if not hwProfileSpec.getKernel(): hwProfileSpec.setKernel( osObjFactory.getOsSysManager().getKernel(osInfo)) if not hwProfileSpec.getInitrd(): hwProfileSpec.setInitrd( osObjFactory.getOsSysManager().getInitrd(osInfo)) self._hpDbApi.addHardwareProfile(hwProfileSpec) # Iterate over all networks in the newly defined hardware profile # and build assocations to provisioning NICs if bUseDefaults: for network in \ [network for network in hwProfileSpec.getNetworks() if network.getType() == 'provision']: # Get provisioning nic for network try: provisioningNic = self.getProvisioningNicForNetwork( network.getAddress(), network.getNetmask()) except NicNotFound: # There is currently no provisioning NIC defined for # this network. This is not a fatal error. continue self.setProvisioningNic(hwProfileSpec.getName(), provisioningNic.getId()) def deleteHardwareProfile(self, name): """Delete hardwareprofile by name.""" self._hpDbApi.deleteHardwareProfile(name) self.getLogger().info('Deleted hardware profile [%s]' % (name)) def updateSoftwareOverrideAllowed(self, hardwareProfileName, flag): self._hpDbApi.updateSoftwareOverrideAllowed(hardwareProfileName, flag) def getHypervisorNodes(self, hardwareProfileName): return self._hpDbApi.getHypervisorNodes(hardwareProfileName) def setProvisioningNic(self, hardwareProfileName, nicId): return self._hpDbApi.setProvisioningNic(hardwareProfileName, nicId) def getProvisioningNicForNetwork(self, network, netmask): return self._hpDbApi.getProvisioningNicForNetwork(network, netmask) def copyHardwareProfile(self, srcHardwareProfileName, dstHardwareProfileName): validation.validateProfileName(dstHardwareProfileName) self.getLogger().info('Copying hardware profile [%s] to [%s]' % (srcHardwareProfileName, dstHardwareProfileName)) return self._hpDbApi.copyHardwareProfile(srcHardwareProfileName, dstHardwareProfileName) def getNodeList(self, hardwareProfileName): return self._hpDbApi.getNodeList(hardwareProfileName)
class NodeManager(TortugaObjectManager): \ # pylint: disable=too-many-public-methods def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._hardwareProfileDbApi = HardwareProfileDbApi() self._cm = ConfigManager() self._san = san.San() def __validateHostName(self, hostname: str, name_format: str) -> NoReturn: """ Raises: ConfigurationError """ bWildcardNameFormat = (name_format == '*') if hostname and not bWildcardNameFormat: # Host name specified, but hardware profile does not # allow setting the host name raise ConfigurationError( 'Hardware profile does not allow setting host names' ' of imported nodes') elif not hostname and bWildcardNameFormat: # Host name not specified but hardware profile expects it raise ConfigurationError( 'Hardware profile requires host names to be set') def createNewNode(self, session: Session, addNodeRequest: dict, dbHardwareProfile: HardwareProfiles, dbSoftwareProfile: Optional[SoftwareProfiles] = None, validateIp: bool = True, bGenerateIp: bool = True, dns_zone: Optional[str] = None) -> Nodes: """ Convert the addNodeRequest into a Nodes object Raises: NicNotFound """ self.getLogger().debug( 'createNewNode(): session=[%s], addNodeRequest=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s],' ' validateIp=[%s], bGenerateIp=[%s]' % (id(session), addNodeRequest, dbHardwareProfile.name, dbSoftwareProfile.name if dbSoftwareProfile else '(none)', validateIp, bGenerateIp)) # This is where the Nodes() object is first created. node = Nodes() # Set the default node state node.state = 'Discovered' if 'rack' in addNodeRequest: node.rack = addNodeRequest['rack'] node.addHostSession = addNodeRequest['addHostSession'] hostname = addNodeRequest['name'] \ if 'name' in addNodeRequest else None # Ensure no conflicting options (ie. specifying host name for # hardware profile in which host names are generated) self.__validateHostName(hostname, dbHardwareProfile.nameFormat) node.name = hostname # Complete initialization of new node record nic_defs = addNodeRequest['nics'] \ if 'nics' in addNodeRequest else [] AddHostServerLocal().initializeNode(session, node, dbHardwareProfile, dbSoftwareProfile, nic_defs, bValidateIp=validateIp, bGenerateIp=bGenerateIp, dns_zone=dns_zone) # Set hardware profile of new node node.hardwareProfileId = dbHardwareProfile.id # Set software profile of new node; if the software profile is None, # attempt to set the software profile to the idle software profile # of the associated hardware profile. This may also be None, in # which case the software profile is undefined. node.softwareprofile = dbSoftwareProfile \ if dbSoftwareProfile else dbHardwareProfile.idlesoftwareprofile node.isIdle = dbSoftwareProfile.isIdle \ if dbSoftwareProfile else True # Return the new node return node def getNode(self, name, optionDict=None): """Get node by name""" optionDict_ = optionDict.copy() if optionDict else {} optionDict_.update({'hardwareprofile': True}) node = self._nodeDbApi.getNode(name, optionDict_) hwprofile = self._hardwareProfileDbApi.getHardwareProfile( node.getHardwareProfile().getName(), {'resourceadapter': True}) adapter_name = hwprofile.getResourceAdapter().getName() \ if hwprofile.getResourceAdapter() else 'default' # Query vcpus from resource adapter ResourceAdapterClass = resourceAdapterFactory.getResourceAdapterClass( adapter_name) # Update Node object node.setVcpus(ResourceAdapterClass().get_node_vcpus(node.getName())) return node def getNodeById(self, nodeId, optionDict=None): """ Get node by node id Raises: NodeNotFound """ return self._nodeDbApi.getNodeById(int(nodeId), optionDict) def getNodeByIp(self, ip): """ Get node by IP address Raises: NodeNotFound """ return self._nodeDbApi.getNodeByIp(ip) def getNodeList(self, tags=None): """Return all nodes""" return self._nodeDbApi.getNodeList(tags=tags) def updateNode(self, nodeName, updateNodeRequest): self.getLogger().debug('updateNode(): name=[{0}]'.format(nodeName)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) if 'nics' in updateNodeRequest: nic = updateNodeRequest['nics'][0] if 'ip' in nic: node.nics[0].ip = nic['ip'] node.nics[0].boot = True # Call resource adapter NodesDbHandler().updateNode(session, node, updateNodeRequest) run_post_install = False if 'state' in updateNodeRequest: run_post_install = node.state == 'Allocated' and \ updateNodeRequest['state'] == 'Provisioned' node.state = updateNodeRequest['state'] session.commit() if run_post_install: self.getLogger().debug( 'updateNode(): run-post-install for node [{0}]'.format( node.name)) self.__scheduleUpdate() except Exception: session.rollback() self.getLogger().exception( 'Exception updating node [{0}]'.format(nodeName)) finally: DbManager().closeSession() def updateNodeStatus(self, nodeName, state=None, bootFrom=None): """Update node status If neither 'state' nor 'bootFrom' are not None, this operation will update only the 'lastUpdated' timestamp. Returns: bool indicating whether state and/or bootFrom differed from current value """ value = 'None' if bootFrom is None else \ '1 (disk)' if int(bootFrom) == 1 else '0 (network)' self.getLogger().debug( 'updateNodeStatus(): node=[%s], state=[%s], bootFrom=[%s]' % (nodeName, state, value)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) result = self._updateNodeStatus(node, state=state, bootFrom=bootFrom) session.commit() return result finally: DbManager().closeSession() def _updateNodeStatus(self, dbNode, state=None, bootFrom=None): """ Internal method which takes a 'Nodes' object instead of a node name. """ result = NodesDbHandler().updateNodeStatus(dbNode, state, bootFrom) # Only change local boot configuration if the hardware profile is # not marked as 'remote' and we're not acting on the installer node. if dbNode.softwareprofile and \ dbNode.softwareprofile.type != 'installer' and \ dbNode.hardwareprofile.location not in \ ('remote', 'remote-vpn'): osUtility.getOsObjectFactory().getOsBootHostManager().\ writePXEFile(dbNode, localboot=bootFrom) return result def __process_nodeErrorDict(self, nodeErrorDict): result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, 'addHostSession': node.addHostSession, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def deleteNode(self, nodespec): """ Delete node by nodespec Raises: NodeNotFound """ installer_hostname = socket.getfqdn().split('.', 1)[0] session = DbManager().openSession() try: nodes = [] for node in self.__expand_nodespec(session, nodespec): if node.name.split('.', 1)[0] == installer_hostname: self.getLogger().info( 'Ignoring request to delete installer node' ' ([{0}])'.format(node.name)) continue nodes.append(node) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) self.__preDeleteHost(nodes) nodeErrorDict = NodesDbHandler().deleteNode(session, nodes) # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict) session.commit() # ============================================================ # Perform actions *after* node deletion(s) have been committed # to database. # ============================================================ self.__postDeleteHost(nodes_deleted) addHostSessions = set( [tmpnode['addHostSession'] for tmpnode in nodes_deleted]) if addHostSessions: AddHostManager().delete_sessions(addHostSessions) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() for nodeName in result['NodesDeleted']: # Remove the Puppet cert bhm.deletePuppetNodeCert(nodeName) bhm.nodeCleanup(nodeName) self.getLogger().info('Node [%s] deleted' % (nodeName)) # Schedule a cluster update self.__scheduleUpdate() return result except TortugaException: session.rollback() raise except Exception: session.rollback() self.getLogger().exception('Exception in NodeManager.deleteNode()') raise finally: DbManager().closeSession() def __process_delete_node_result(self, nodeErrorDict): # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def __preDeleteHost(self, nodes): self.getLogger().debug('__preDeleteHost(): nodes=[%s]' % (' '.join([node.name for node in nodes]))) if not nodes: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node in nodes: kitmgr.pre_delete_host( node.hardwareprofile.name, node.softwareprofile.name if node.softwareprofile else None, nodes=[node.name]) def __postDeleteHost(self, nodes_deleted): # 'nodes_deleted' is a list of dicts of the following format: # # { # 'name': 'compute-01', # 'softwareprofile': 'Compute', # 'hardwareprofile': 'LocalIron', # } # # if the node does not have an associated software profile, the # dict does not contain the key 'softwareprofile'. self.getLogger().debug('__postDeleteHost(): nodes_deleted=[%s]' % (nodes_deleted)) if not nodes_deleted: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node_dict in nodes_deleted: kitmgr.post_delete_host(node_dict['hardwareprofile'], node_dict['softwareprofile'] if 'softwareprofile' in node_dict else None, nodes=[node_dict['name']]) def __scheduleUpdate(self): tortugaSubprocess.executeCommand( os.path.join(self._cm.getRoot(), 'bin/schedule-update')) def getInstallerNode(self, optionDict=None): return self._nodeDbApi.getNode(self._cm.getInstaller(), optionDict=optionDict) def getProvisioningInfo(self, nodeName): return self._nodeDbApi.getProvisioningInfo(nodeName) def getKickstartFile(self, node, hardwareprofile, softwareprofile): """ Generate kickstart file for specified node Raises: OsNotSupported """ osFamilyName = softwareprofile.os.family.name try: osSupportModule = __import__('tortuga.os.%s.osSupport' % (osFamilyName), fromlist=['OSSupport']) except ImportError: raise OsNotSupported('Operating system family [%s] not supported' % (osFamilyName)) OSSupport = osSupportModule.OSSupport tmpOsFamilyInfo = OsFamilyInfo(softwareprofile.os.family.name, softwareprofile.os.family.version, softwareprofile.os.family.arch) return OSSupport(tmpOsFamilyInfo).getKickstartFileContents( node, hardwareprofile, softwareprofile) def __transferNodeCommon(self, session, dbDstSoftwareProfile, results): \ # pylint: disable=no-self-use # Aggregate list of transferred nodes based on hardware profile # to call resource adapter minimal number of times. hwProfileMap = {} for transferResultDict in results: dbNode = transferResultDict['node'] dbHardwareProfile = dbNode.hardwareprofile if dbHardwareProfile not in hwProfileMap: hwProfileMap[dbHardwareProfile] = [transferResultDict] else: hwProfileMap[dbHardwareProfile].append(transferResultDict) session.commit() nodeTransferDict = {} # Kill two birds with one stone... do the resource adapter # action as well as populate the nodeTransferDict. This saves # having to iterate twice on the same result data. for dbHardwareProfile, nodesDict in hwProfileMap.items(): adapter = resourceAdapterFactory.getApi( dbHardwareProfile.resourceadapter.name) dbNodeTuples = [] for nodeDict in nodesDict: dbNode = nodeDict['node'] dbSrcSoftwareProfile = nodeDict['prev_softwareprofile'] if dbSrcSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbSrcSoftwareProfile.name] = { 'added': [], 'removed': [dbNode], } else: nodeTransferDict[dbSrcSoftwareProfile.name]['removed'].\ append(dbNode) if dbDstSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbDstSoftwareProfile.name] = { 'added': [dbNode], 'removed': [], } else: nodeTransferDict[dbDstSoftwareProfile.name]['added'].\ append(dbNode) # The destination software profile is available through # node relationship. dbNodeTuples.append((dbNode, dbSrcSoftwareProfile)) adapter.transferNode(dbNodeTuples, dbDstSoftwareProfile) session.commit() # Now call the 'refresh' action to all participatory components KitActionsManager().refresh(nodeTransferDict) return results def transferNode(self, nodespec, dstSoftwareProfileName, bForce=False): """ Transfer nodes defined by 'nodespec' to 'dstSoftwareProfile' Raises: NodeNotFound SoftwareProfileNotFound NodeTransferNotValid """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) dbDstSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, dstSoftwareProfileName) results = NodesDbHandler().transferNode(session, nodes, dbDstSoftwareProfile, bForce=bForce) return self.__transferNodeCommon(session, dbDstSoftwareProfile, results) finally: DbManager().closeSession() def transferNodes(self, srcSoftwareProfileName, dstSoftwareProfileName, count, bForce=False): """ Transfer 'count' nodes from 'srcSoftwareProfile' to 'dstSoftwareProfile' Raises: SoftwareProfileNotFound """ session = DbManager().openSession() try: # It is not necessary to specify a source software profile. If # not specified, pick any eligible nodes in the hardware profile # mapped to the destination software profile. Don't ask me who # uses this capability, but it's here if you need it... dbSrcSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile( session, srcSoftwareProfileName) \ if srcSoftwareProfileName else None dbDstSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, dstSoftwareProfileName) results = NodesDbHandler().transferNodes(session, dbSrcSoftwareProfile, dbDstSoftwareProfile, int(float(count)), bForce=bForce) return self.__transferNodeCommon(session, dbDstSoftwareProfile, results) finally: DbManager().closeSession() def idleNode(self, nodespec): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) result = NodesDbHandler().idleNode(session, nodes) # Convert list of Nodes to list of node names for providing # user feedback. result_dict = {} for key, dbNodes in result.items(): result_dict[key] = [dbNode.name for dbNode in dbNodes] session.commit() # Remove Puppet certificate(s) for idled node(s) for node_name in result_dict['success']: # Remove Puppet certificate for idled node bhm = osUtility.getOsObjectFactory().getOsBootHostManager() bhm.deletePuppetNodeCert(node_name) # Schedule a cluster update self.__scheduleUpdate() return result_dict except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('[%s] %s' % (self.__class__.__name__, ex)) raise finally: DbManager().closeSession() def __process_activateNode_results(self, tmp_results, dstswprofilename): results = {} for key, values in tmp_results.items(): # With the exception of the "ProfileMappingNotAllowed" dict # item, all items in the dict are lists of nodes. if key != 'ProfileMappingNotAllowed': results[key] = [dbNode.name for dbNode in values] else: results[key] = \ [(value[0].name, value[1], value[2]) for value in values] if tmp_results['success']: # Iterate over activated nodes, creating dict keyed on # 'addHostSession' addHostSessions = {} for node in tmp_results['success']: if node.addHostSession not in addHostSessions: addHostSessions[node.addHostSession] = [] addHostSessions[node.addHostSession] = \ node.hardwareprofile.name # For each 'addHostSession', call postAddHost() for addHostSession, hwprofile in addHostSessions.items(): AddHostManager().postAddHost(hwprofile, dstswprofilename, addHostSession) return results def activateNode(self, nodespec, softwareProfileName): """ Raises: SoftwareProfileNotFound NodeNotFound TortugaException """ session = DbManager().openSession() try: dbSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, softwareProfileName) \ if softwareProfileName else None dbNodes = self.__expand_nodespec(session, nodespec) if not dbNodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) tmp_results = NodesDbHandler().activateNode( session, dbNodes, dbSoftwareProfile) results = self.__process_activateNode_results( tmp_results, softwareProfileName) session.commit() # Schedule a cluster update self.__scheduleUpdate() return results except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession() def startupNode(self, nodespec, remainingNodeList=None, bootMethod='n'): """ Raises: NodeNotFound """ return self._nodeDbApi.startupNode(nodespec, remainingNodeList=remainingNodeList or [], bootMethod=bootMethod) def shutdownNode(self, nodespec, bSoftShutdown=False): """ Raises: NodeNotFound """ return self._nodeDbApi.shutdownNode(nodespec, bSoftShutdown) def build_node_filterspec(self, nodespec): filter_spec = [] for nodespec_token in nodespec.split(','): # Convert shell-style wildcards into SQL wildcards if '*' in nodespec_token or '?' in nodespec_token: filter_spec.append( nodespec_token.replace('*', '%').replace('?', '_')) continue if '.' not in nodespec_token: filter_spec.append(nodespec_token) filter_spec.append(nodespec_token + '.%') continue # Add nodespec "AS IS" filter_spec.append(nodespec_token) return filter_spec def __expand_nodespec(self, session, nodespec): \ # pylint: disable=no-self-use # Expand wildcards in nodespec. Each token in the nodespec can # be wildcard that expands into one or more nodes. return NodesDbHandler().getNodesByNameFilter( session, self.build_node_filterspec(nodespec)) def rebootNode(self, nodespec, bSoftReset=False, bReinstall=False): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() if bReinstall: for dbNode in nodes: bhm.setNodeForNetworkBoot(dbNode) results = NodesDbHandler().rebootNode(session, nodes, bSoftReset) session.commit() return results finally: DbManager().closeSession() def checkpointNode(self, nodeName): return self._nodeDbApi.checkpointNode(nodeName) def revertNodeToCheckpoint(self, nodeName): return self._nodeDbApi.revertNodeToCheckpoint(nodeName) def migrateNode(self, nodeName, remainingNodeList, liveMigrate): return self._nodeDbApi.migrateNode(nodeName, remainingNodeList, liveMigrate) def evacuateChildren(self, nodeName): self._nodeDbApi.evacuateChildren(nodeName) def getChildrenList(self, nodeName): return self._nodeDbApi.getChildrenList(nodeName) def setParentNode(self, nodeName, parentNodeName): self._nodeDbApi.setParentNode(nodeName, parentNodeName) def addStorageVolume(self, nodeName, volume, isDirect="DEFAULT"): """ Raises: VolumeDoesNotExist UnsupportedOperation """ node = self.getNode(nodeName, {'hardwareprofile': True}) # Only allow persistent volumes to be attached... vol = self._san.getVolume(volume) if vol is None: raise VolumeDoesNotExist('Volume [%s] does not exist' % (volume)) if not vol.getPersistent(): raise UnsupportedOperation( 'Only persistent volumes can be attached') api = resourceAdapterFactory.getApi( node.getHardwareProfile().getResourceAdapter().getName()) if isDirect == "DEFAULT": return api.addVolumeToNode(node, volume) return api.addVolumeToNode(node, volume, isDirect) def removeStorageVolume(self, nodeName, volume): """ Raises: VolumeDoesNotExist UnsupportedOperation """ node = self.getNode(nodeName, {'hardwareprofile': True}) api = resourceAdapterFactory.getApi( node.getHardwareProfile().getResourceAdapter().getName()) vol = self._san.getVolume(volume) if vol is None: raise VolumeDoesNotExist('The volume [%s] does not exist' % (volume)) if not vol.getPersistent(): raise UnsupportedOperation( 'Only persistent volumes can be detached') return api.removeVolumeFromNode(node, volume) def getStorageVolumes(self, nodeName): return self._san.getNodeVolumes(self.getNode(nodeName).getName()) def getNodesByNodeState(self, state): return self._nodeDbApi.getNodesByNodeState(state) def getNodesByNameFilter(self, _filter): return self._nodeDbApi.getNodesByNameFilter(_filter)
def test_getHardwareProfile_failed(dbm): with dbm.session() as session: with pytest.raises(HardwareProfileNotFound): HardwareProfileDbApi().getHardwareProfile(session, 'doesnotexistEXAMPLE')
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from tortuga.db.hardwareProfileDbApi import HardwareProfileDbApi from tortuga.exceptions.hardwareProfileNotFound import HardwareProfileNotFound from tortuga.exceptions.invalidArgument import InvalidArgument from tortuga.objects.hardwareProfile import HardwareProfile hardwareProfileDbApi = HardwareProfileDbApi() def test_getHardwareProfile(dbm): """ Get hardware profile with default resource adapter configuration defined. """ name = 'aws' with dbm.session() as session: result = HardwareProfileDbApi().getHardwareProfile(session, name) assert isinstance(result, HardwareProfile)
class HardwareProfileManager(TortugaObjectManager): def __init__(self): super(HardwareProfileManager, self).__init__() self._hpDbApi = HardwareProfileDbApi() self._spDbApi = SoftwareProfileDbApi() self._networkDbApi = NetworkDbApi() self._globalParameterDbApi = GlobalParameterDbApi() self._nodeDbApi = NodeDbApi() self._logger = logging.getLogger(HARDWARE_PROFILE_NAMESPACE) def getHardwareProfileList(self, session: Session, optionDict: Optional[Union[Dict[str, str], None]] = None, tags: Optional[Tags] = None): """ Return all of the hardwareprofiles with referenced components in this hardwareprofile """ return self._hpDbApi.getHardwareProfileList(session, optionDict=optionDict, tags=tags) def getHardwareProfile(self, session: Session, name: str, optionDict: Optional[Union[dict, None]] = None): return self._hpDbApi.getHardwareProfile(session, name, optionDict) def getHardwareProfileById(self, session: Session, id_, optionDict=None): return self._hpDbApi.getHardwareProfileById(session, id_, optionDict) def addAdmin(self, session: Session, hardwareProfileName: str, adminUsername: str): """ Add an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound HardwareProfileNotFound """ return self._hpDbApi.addAdmin(session, hardwareProfileName, adminUsername) def deleteAdmin(self, session: Session, hardwareProfileName: str, adminUsername: str): """ Remove an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound HardwareProfileNotFound """ return self._hpDbApi.deleteAdmin(session, hardwareProfileName, adminUsername) def updateHardwareProfile(self, session: Session, hardwareProfileObject: Any): """ Update a hardware profile in the database that matches the passed in hardware profile object. The ID is used as the primary matching criteria. Returns: None Throws: TortugaException HardwareProfileNotFound InvalidArgument """ self._logger.debug('Updating hardware profile [%s]' % (hardwareProfileObject.getName())) existing_hwp = self.getHardwareProfileById( session, hardwareProfileObject.getId()) if hardwareProfileObject.getInstallType() and \ hardwareProfileObject.getInstallType() != \ existing_hwp.getInstallType(): raise InvalidArgument( 'Hardware profile installation type cannot be' ' changed' % (hardwareProfileObject.getName())) self._hpDbApi.updateHardwareProfile(session, hardwareProfileObject) # # Get the new version from the DB # new_hwp = self.getHardwareProfileById(session, hardwareProfileObject.getId()) # # If the tags have changed, fire the tags changed event # if existing_hwp.getTags() != new_hwp.getTags(): HardwareProfileTagsChanged.fire( hardwareprofile_id=str(new_hwp.getId()), hardwareprofile_name=new_hwp.getName(), tags=new_hwp.getTags(), previous_tags=existing_hwp.getTags()) def createHardwareProfile(self, session: Session, hwProfileSpec: HardwareProfile, settingsDict: Optional[Union[dict, None]] = None): bUseDefaults = settingsDict['defaults'] \ if settingsDict and 'defaults' in settingsDict else False osInfo = settingsDict['osInfo'] \ if settingsDict and \ settingsDict and 'osInfo' in settingsDict else None validation.validateProfileName(hwProfileSpec.getName()) if hwProfileSpec.getDescription() is None: hwProfileSpec.setDescription('%s Nodes' % (hwProfileSpec.getName())) installerNode = self._nodeDbApi.getNode(session, ConfigManager().getInstaller(), {'softwareprofile': True}) if bUseDefaults: if not hwProfileSpec.getNetworks(): # No <network>...</network> entries found in the template, # use the default provisioning interface from the primary # installer. # Find first provisioning network and use it for nic in installerNode.getNics(): network = nic.getNetwork() if network.getType() == 'provision': # for now set the default interface to be index 0 # with the same device networkDevice = fixNetworkDeviceName( nic.getNetworkDevice().getName()) network.setNetworkDevice( NetworkDevice(name=networkDevice)) hwProfileSpec.getNetworks().append(network) break else: raise NetworkNotFound( 'Unable to find provisioning network') else: # Ensure network device is defined installerNic = None for network in hwProfileSpec.getNetworks(): for installerNic in installerNode.getNics(): installerNetwork = installerNic.getNetwork() if network.getId() and \ network.getId() == installerNetwork.getId(): break elif network.getAddress() and \ network.getAddress() == \ installerNetwork.getAddress() and \ network.getNetmask() and \ network.getNetmask() == \ installerNetwork.getNetmask(): break else: # Unable to find network matching specification in # template. raise NetworkNotFound( 'Unable to find provisioning network [%s]' % (network)) networkDevice = fixNetworkDeviceName( installerNic.getNetworkDevice().getName()) network.setNetworkDevice(NetworkDevice(name=networkDevice)) if not osInfo: osInfo = installerNode.getSoftwareProfile().getOsInfo() osObjFactory = osUtility.getOsObjectFactory(osInfo.getName()) if not hwProfileSpec.getKernel(): hwProfileSpec.setKernel( osObjFactory.getOsSysManager().getKernel(osInfo)) if not hwProfileSpec.getInitrd(): hwProfileSpec.setInitrd( osObjFactory.getOsSysManager().getInitrd(osInfo)) self._hpDbApi.addHardwareProfile(session, hwProfileSpec) # Iterate over all networks in the newly defined hardware profile # and build assocations to provisioning NICs if bUseDefaults: for network in \ [network for network in hwProfileSpec.getNetworks() if network.getType() == 'provision']: # Get provisioning nic for network try: provisioningNic = self.getProvisioningNicForNetwork( session, network.getAddress(), network.getNetmask()) except NicNotFound: # There is currently no provisioning NIC defined for # this network. This is not a fatal error. continue self.setProvisioningNic(session, hwProfileSpec.getName(), provisioningNic.getId()) # # Fire the tags changed event for all creates that have tags # # Get the latest version from the db in case the create method # added some embellishments # hwp = self.getHardwareProfile(session, hwProfileSpec.getName()) if hwp.getTags(): HardwareProfileTagsChanged.fire(hardwareprofile_id=str( hwp.getId()), hardwareprofile_name=hwp.getName(), tags=hwp.getTags(), previous_tags={}) def deleteHardwareProfile(self, session: Session, name: str) -> None: """ Delete hardwareprofile by name. """ self._hpDbApi.deleteHardwareProfile(session, name) self._logger.info('Deleted hardware profile [%s]' % (name)) def updateSoftwareOverrideAllowed(self, session: Session, hardwareProfileName: str, flag: bool) -> None: self._hpDbApi.updateSoftwareOverrideAllowed(session, hardwareProfileName, flag) def setProvisioningNic(self, session: Session, hardwareProfileName: str, nicId: int): return self._hpDbApi.setProvisioningNic(session, hardwareProfileName, nicId) def getProvisioningNicForNetwork(self, session: Session, network: str, netmask: str): return self._hpDbApi.getProvisioningNicForNetwork( session, network, netmask) def copyHardwareProfile(self, session: Session, srcHardwareProfileName: str, dstHardwareProfileName: str): validation.validateProfileName(dstHardwareProfileName) self._logger.info('Copying hardware profile [%s] to [%s]' % (srcHardwareProfileName, dstHardwareProfileName)) self._hpDbApi.copyHardwareProfile(session, srcHardwareProfileName, dstHardwareProfileName) # # Fire the tags changed event for all copies that have tags # hwp = self.getHardwareProfile(session, dstHardwareProfileName) if hwp.getTags(): HardwareProfileTagsChanged.fire(hardwareprofile_id=str( hwp.getId()), hardwareprofile_name=hwp.getName(), tags=hwp.getTags(), previous_tags={}) def getNodeList(self, session: Session, hardwareProfileName: str): return self._hpDbApi.getNodeList(session, hardwareProfileName)