def updateSoftwareProfile(self, softwareProfileObject): """ Update a software profile """ session = DbManager().openSession() try: dbSoftwareProfile = self._softwareProfilesDbHandler.\ getSoftwareProfileById( session, softwareProfileObject.getId()) self.__populateSoftwareProfile(session, softwareProfileObject, dbSoftwareProfile) session.commit() except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession()
def process_delete_host_request(request): session = DbManager().openSession() try: req = NodeRequestsDbHandler().get_by_addHostSession( session, request['transaction_id']) if req is None: # Session was deleted prior to being process. Nothing to do... return ahm.update_session(request['transaction_id'], running=True) logger.debug('process_delete_host_request(): transaction_id=[{0}],' ' nodespec=[{1}]'.format(request['transaction_id'], request['nodespec'])) try: NodeApi().deleteNode(request['nodespec']) ahm.delete_session(request['transaction_id']) session.delete(req) except NodeNotFound: ahm.delete_session(request['transaction_id']) session.delete(req) except TortugaException as exc: logger.exception('Exception while deleting nodes') req.message = str(exc) req.state = 'error' req.last_update = datetime.datetime.utcnow() finally: ahm.update_session(request['transaction_id'], running=False) finally: session.commit() DbManager().closeSession()
def evacuateChildren(self, nodeName): """ Evacuate Children of node """ session = DbManager().openSession() try: dbNode = self._nodesDbHandler.getNode(session, nodeName) self._nodesDbHandler.evacuateChildren(session, dbNode) session.commit() except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession()
def main(): spot_instance_cache = configparser.ConfigParser() spot_instance_cache.read('/opt/tortuga/var/spot-instances.conf') spot_instances = [] for item in spot_instance_cache.sections(): try: spot_instances.append(spot_instance_cache.get(item, 'node')) except configparser.NoOptionError: pass with DbManager().session() as session: for node in NodesDbHandler().getNodeList(session): if node.hardwareprofile.resourceadapter and \ node.hardwareprofile.resourceadapter.name == 'AWS': if node.name in spot_instances: print(node.name)
def action_configure(self, _, *args, **kwargs): """ Configure. :param _: Unused :param *args: Unused :param **kwargs: Unused :returns: None """ with DbManager().session() as session: installer_node = NodesDbHandler().getNode( session, self.kit_installer.config_manager.getInstaller()) for provisioning_nic in installer_node.hardwareprofile.nics: self._provisioning_nics(provisioning_nic) self._node_nics(session)
def addKit(self, kit): """ Insert kit into the db. Raises: KitAlreadyExists DbError """ session = DbManager().openSession() try: dbKit = self._kitsDbHandler.addKit(session, kit) session.commit() iteration = dbKit.components[0].os_components[0].os.arch \ if dbKit.isOs else dbKit.iteration kit_descr = format_kit_descriptor( dbKit.name, dbKit.version, iteration) logmsg = 'Installed OS kit [{0}] successfully' \ if dbKit.isOs else 'Installed kit [{0}] successfully' self.getLogger().info(logmsg.format(kit_descr)) except TortugaException: session.rollback() raise except Exception as exc: session.rollback() self.getLogger().exception('%s' % (exc)) raise finally: DbManager().closeSession()
def setIp(self, nicId, ip): try: session = DbManager().openSession() try: dbNic = self._nicsDbHandler.getNicById(session, nicId) self.getLogger().debug('setIp: nicId [%s] ip [%s]' % (nicId, ip)) dbNic.ip = ip session.commit() return except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession()
def getResourceAdapter(self, name): resourceAdapterObj = None try: session = DbManager().openSession() dbResourceAdapter = self._resourceAdaptersDbHandler.\ getResourceAdapter(session, name) self.loadRelations(dbResourceAdapter, {'kit': True}) resourceAdapterObj = ResourceAdapter.getFromDbDict( dbResourceAdapter.__dict__) except TortugaException: raise except Exception as ex: self.getLogger().exception(str(ex)) return resourceAdapterObj
def runCommand(self): self.parseArgs( _(""" Display list of nodes able to use the specified software profile, ordered by cost. """)) softwareProfileName = self.getArgs().softwareProfile nodeApi = NodeApi() softwareUsesHardwareDbApi = SoftwareUsesHardwareDbApi() hardwareProfileDbApi = HardwareProfileDbApi() load_kits() with DbManager().session() as session: hwPList = hardwareProfileDbApi.getHardwareProfileList(session) hardwareProfileIdList = softwareUsesHardwareDbApi.\ getAllowedHardwareProfilesBySoftwareProfileName( session, softwareProfileName) nodeList = nodeApi.getNodeList(session) usableNodes = [] for node in nodeList: if (node.getHardwareProfile().getId() in hardwareProfileIdList) \ and node.getIsIdle(): usableNodes.append(node) costNameList = [] for node in usableNodes: nodeHwP = node.getHardwareProfile().getId() for hwP in hwPList: if hwP.getId() == nodeHwP: costNameList.append( [int(hwP.getCost()), node.getName()]) break costNameList.sort() for node in costNameList: print('%s' % (node[1]))
def runCommand(self): self.parseArgs() load_kits() with DbManager().session() as session: kitmgr = KitActionsManager() kitmgr.session = session component = kitmgr.load_component(self.getArgs().cname) nodegroup = 'installer' if '_configure' not in dir(component): print(_('This component does not have configuration'), file=sys.stderr) sys.exit(0) component._configure(nodegroup, sys.stdout)
def main(): gce = Gce() session = DbManager().openSession() # Find first hardware profile that has 'gce' as the resource adapter for hwprofile in \ HardwareProfilesDbHandler().getHardwareProfileList(session): if hwprofile.resourceadapter and \ hwprofile.resourceadapter.name == 'gce': break else: raise Exception( 'No hardware profile found with Google Compute Engine resource' ' adapter enabled') print(('Using hardware profile \'%s\'' ' (use --hardware-profile to override)' % (hwprofile.name))) # Find first software profile mapped to hardware profile swprofile = hwprofile.mappedsoftwareprofiles[0] print(('Using software profile \'%s\'' ' (use --software-profile to override)' % (swprofile.name))) gce_session = gce._Gce__initSession(hwprofile) # gce._Gce__launchInstance(gce_session, instance_name) addNodesRequest = { 'count': 1, # 'nodeDetails': [ # { # 'name': 'compute-05.private', # }, # ], } gce._Gce__addActiveNodes( gce_session, session, addNodesRequest, hwprofile, swprofile)
def runCommand(self) -> None: self.parseArgs() with DbManager().session() as session: try: installer_swprofile = session.query(SoftwareProfile).filter( SoftwareProfile.type == 'installer').first() except NoResultFound: raise OperationFailed( 'Malformed installation: no installer software profile' ' found') installer_node = installer_swprofile.nodes[0] host_name_arg = self.getArgs().hostname[0] if self.getArgs().public: installer_node.public_hostname = host_name_arg else: installer_node.name = host_name_arg session.commit()
def process_addhost_request(addHostSession): logger = logging.getLogger('tortuga.addhost') logger.addHandler(logging.NullHandler()) with DbManager().session() as session: req = NodeRequestsDbHandler().get_by_addHostSession( session, addHostSession) if req is None: # session was deleted prior to being processed; nothing to do... return addHostRequest = dict(list(json.loads(req.request).items())) with AddHostSessionContextManager(req.addHostSession) as ahm: try: logger.debug('process_addhost_request(): Processing add host' ' request [%s]' % (req.addHostSession)) ahm.addHosts(session, req.addHostSession, addHostRequest) # Delete session log ahm.delete_session(req.addHostSession) # Completed node requests are deleted immediately session.delete(req) logger.debug( 'process_addhost_request() Add host request [%s] processed' ' successfully' % (req.addHostSession)) except Exception as exc: logger.exception( 'A fatal error occurred during add host workflow') req.state = 'error' req.message = str(exc) req.last_update = datetime.datetime.utcnow() finally: session.commit()
def updateNode(self, nodeName, updateNodeRequest): self.getLogger().debug('updateNode(): name=[{0}]'.format(nodeName)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) if 'nics' in updateNodeRequest: nic = updateNodeRequest['nics'][0] if 'ip' in nic: node.nics[0].ip = nic['ip'] node.nics[0].boot = True # Call resource adapter NodesDbHandler().updateNode(session, node, updateNodeRequest) run_post_install = False if 'state' in updateNodeRequest: run_post_install = node.state == 'Allocated' and \ updateNodeRequest['state'] == 'Provisioned' node.state = updateNodeRequest['state'] session.commit() if run_post_install: self.getLogger().debug( 'updateNode(): run-post-install for node [{0}]'.format( node.name)) self.__scheduleUpdate() except Exception: session.rollback() self.getLogger().exception( 'Exception updating node [{0}]'.format(nodeName)) finally: DbManager().closeSession()
def _update_resource_adapter_configuration(adapter_cfg, profile_name): normalized_cfg = [] for key, value in adapter_cfg.items(): normalized_cfg.append({ 'key': key, 'value': value, }) ra_api = api.ResourceAdapterConfigurationApi() # check for resource adapter configuration with DbManager().session() as session: try: ra_api.get(session, 'AWS', profile_name) print_statement( 'Updating AWS resource adapter configuration profile [{0}]', profile_name) # remove potentially conflicting configuration items if 'user_data_script_template' in adapter_cfg: normalized_cfg.append({ 'key': 'cloud_init_script_template', 'value': None }) elif 'cloud_init_script_template' in adapter_cfg: normalized_cfg.append({ 'key': 'user_data_script_template', 'value': None }) ra_api.update(session, 'AWS', profile_name, normalized_cfg) except ResourceNotFound: print_statement( 'Creating AWS resource adapter configuration profile [{0}]', profile_name) ra_api.create(session, 'AWS', profile_name, normalized_cfg)
def _write_config_to_db(self, adapter_cfg: Dict[str, str], profile_name: str): normalized_cfg = [] for key, value in adapter_cfg.items(): normalized_cfg.append({ 'key': key, 'value': value, }) api = ResourceAdapterConfigurationApi() with DbManager().session() as session: try: api.get(session, self.adapter_type, profile_name) print('Updating resource adapter configuration ' 'profile: {}'.format(profile_name)) api.update(session, self.adapter_type, profile_name, normalized_cfg) except ResourceNotFound: print('Creating resource adapter configuration ' 'profile {}'.format(profile_name)) api.create(session, self.adapter_type, profile_name, normalized_cfg)
def runCommand(self): self.parseArgs( _(""" Installs operating system media to Tortuga for the purpose of package-based node provisioning. """)) load_kits() api = getKitApi(self.getUsername(), self.getPassword()) # Pre-process the media URL list os_media_urls: List[str] = self.getArgs().osMediaUrl.split(',') with DbManager().session() as session: api.installOsKit(session, os_media_urls, bUseSymlinks=self.getArgs().symlinksFlag, bInteractive=True, mirror=self.getArgs().mirror) if self.getArgs().sync: Puppet().agent()
def runCommand(self): self.parseArgs( _(""" Show the software to hardware profile mappings for the specified software profile.""")) load_kits() softwareUsesHardwareDbApi = SoftwareUsesHardwareDbApi() with DbManager().session() as session: api = HardwareProfileApi() hardwareProfileIdList = softwareUsesHardwareDbApi.\ getAllowedHardwareProfilesBySoftwareProfileName( session, self.getArgs().swprofile) print('Software Profile [%s] is allowed to use the following' ' hardware profiles:' % (self.getArgs().swprofile)) for hardwareProfileId in hardwareProfileIdList: for hp in api.getHardwareProfileList(session): if hp.getId() == hardwareProfileId: print(hp.getName())
TortugaCeleryApp.app = Application() TortugaCeleryApp.dbm = DbManager() # # This environment variable is set by the test runner (in our case tox # through tox.ini). When it is set, it allows us to use a test version of # the Celery app that does not depend on an external broker. # if 'TORTUGA_TEST' in os.environ: app = TestApp(include=[ 'tortuga.events.tasks', 'tortuga.resourceAdapter.tasks', ]) app.app = Application() app.dbm = DbManager() # # In regular mode, we also want to load the kits, and include any tasks # they may have as well. # else: load_kits() kits_task_modules: List[str] = [] for kit_installer_class in get_all_kit_installers(): kit_installer = kit_installer_class() kit_installer.register_event_listeners() kits_task_modules += kit_installer.task_modules config_manager = ConfigManager() redis_password = config_manager.getRedisPassword()
def dbm(): dbmgr = DbManager(create_engine('sqlite:///:memory:', echo=False)) dbmgr.init_database() rhel7_os_family_info = osFamilyInfo.OsFamilyInfo('rhel', '7', 'x86_64') os_info = osInfo.OsInfo('centos', '7.4', 'x86_64') os_info.setOsFamilyInfo(rhel7_os_family_info) settings = { 'language': 'en', 'keyboard': 'en_US', 'timezone': 'UTC', 'utc': 'true', 'intWebPort': '8008', 'intWebServicePort': '8444', 'adminPort': '8443', 'eulaAccepted': 'true', 'depotpath': '/opt/tortuga/depot', 'osInfo': os_info, 'fqdn': getfqdn(), 'installer_software_profile': 'Installer', 'installer_hardware_profile': 'Installer', } with dbmgr.session() as session: primeDb(session, settings) init_global_parameters(session, settings) # create sample tags all_tags = [] for idx in range(1, 5 + 1): tag = dict( name='tag{:d}'.format(idx), value='value{:d}'.format(idx), ) all_tags.append(tag) installer_node = session.query(Node).filter( Node.name == settings['fqdn']).one() os_ = session.query(OperatingSystem).filter( OperatingSystem.name == 'centos').one() rhel7_os_family = session.query(OperatingSystemFamily).filter( OperatingSystemFamily.name == 'rhel').one() # add add'l operating system/family rhel75_os = OperatingSystem(name='rhel', version='7.5', arch='x86_64') rhel75_os.family = rhel7_os_family session.add(rhel75_os) admin = Admin(username='******', password=pbkdf2_sha256.hash('password'), realname='realname', description='description') session.add(admin) eth0_network_device = NetworkDevice(name='eth0') eth1_network_device = NetworkDevice(name='eth1') # Add dummy provisioning network network = Network(address='10.2.0.0', netmask='255.255.255.0', name='Provisioning network on eth1', type='provision') installer_node.hardwareprofile.hardwareprofilenetworks.append( HardwareProfileNetwork( network=network, networkdevice=eth1_network_device, )) # create nic on installer installer_nic = Nic( ip='10.2.0.1', network=network, networkdevice=eth1_network_device, ) installer_node.nics = [installer_nic] # create 'base' kit kit = Kit() kit.name = 'base' kit.version = '7.1.0' kit.iteration = '0' kit.description = 'Sample base kit' installer_component = Component(name='installer', version='7.0') installer_component.family = [rhel7_os_family] installer_component.kit = kit core_component = Component(name='core', version='7.0', description='Compute component') core_component.family = [rhel7_os_family] core_component.kit = kit # add component not enabled by default pdsh_component = Component(name='pdsh', version='7.0', description='pdsh component') pdsh_component.family = [rhel7_os_family] pdsh_component.kit = kit # add fake dhcp component dhcpd_component = Component(name='dhcpd', version='7.0', description='Mock dhcpd component') dhcpd_component.family = [rhel7_os_family] dhcpd_component.kit = kit session.add(kit) # create OS kit os_kit = Kit(name='centos', version='7.4', iteration='0') os_kit.isOs = True os_component = Component(name='centos-7.4-x86_64', version='7.4') os_component.os = [os_] os_component.kit = os_kit os_kit.components.append(os_component) session.add(os_kit) # create resource adapter kit ra_kit = Kit(name='awsadapter', version='0.0.1', iteration='0') ra_component = Component(name='management', version='0.0.1') ra_component.family.append(rhel7_os_family) ra_kit.components.append(ra_component) installer_node.softwareprofile.components.append(ra_component) installer_node.softwareprofile.components.append(installer_component) installer_node.softwareprofile.components.append(dhcpd_component) # create 'default' resource adapter default_adapter = ResourceAdapter(name='default', kit=kit) # create resource adapter aws_adapter = ResourceAdapter(name='aws', kit=ra_kit) aws_adapter_cfg = ResourceAdapterConfig( name='default', description='Example default resource adapter configuration') aws_adapter_cfg.configuration.append( ResourceAdapterSetting(key='ami', value='ami-XXXXXX')) aws_adapter_cfg.configuration.append( ResourceAdapterSetting(key='use_instance_hostname', value='true')) aws_adapter.resource_adapter_config.append(aws_adapter_cfg) # add second resource adapter configuration aws_adapter_cfg2 = ResourceAdapterConfig(name='nondefault', admin=admin) aws_adapter_cfg2.configuration.append( ResourceAdapterSetting(key='another_key', value='another_value')) aws_adapter.resource_adapter_config.append(aws_adapter_cfg2) session.add(aws_adapter) # create 'aws' hardware profile # does *not* have a default resource adapter config aws_hwprofile = HardwareProfile(name='aws') aws_hwprofile.location = 'remote' aws_hwprofile.resourceadapter = aws_adapter aws_hwprofile.nameFormat = '*' session.add(aws_hwprofile) # add hardware profile 'aws2' with non-default configuration profile aws_hwprofile2 = HardwareProfile( name='aws2', location='remote', resourceadapter=aws_adapter, default_resource_adapter_config=aws_adapter_cfg2, nameFormat='*', ) session.add(aws_hwprofile2) # create 'compute' software profile compute_swprofile = SoftwareProfile(name='compute') compute_swprofile.os = os_ compute_swprofile.components = [core_component] compute_swprofile.type = 'compute' # create 'compute2' software profile compute2_swprofile = SoftwareProfile(name='compute2', os=os_, components=[core_component], type='compute') # map 'aws' and 'aws2' to 'compute' compute_swprofile.hardwareprofiles.extend( (aws_hwprofile, aws_hwprofile2)) # create 'localiron' hardware profile localiron_hwprofile = HardwareProfile( name='localiron', nameFormat='compute-#NN', location='local', ) localiron_hwprofile.resourceadapter = default_adapter localiron_hwprofile.mappedsoftwareprofiles = [ compute_swprofile, compute2_swprofile ] localiron_hwprofile.hardwareprofilenetworks.append( HardwareProfileNetwork( network=network, networkdevice=eth0_network_device, )) session.add(localiron_hwprofile) # create "localironalt" hardware profile with nameFormat set to '*' localironalt_hwprofile = HardwareProfile( name='localironalt', nameFormat='*', location='local', ) localironalt_hwprofile.resourceadapter = default_adapter localironalt_hwprofile.mappedsoftwareprofiles = [ compute_swprofile, compute2_swprofile ] localironalt_hwprofile.hardwareprofilenetworks.append( HardwareProfileNetwork( network=network, networkdevice=eth0_network_device, )) # create 'nonetwork' hardware profile nonetwork_hwprofile = HardwareProfile(name='nonetwork') nonetwork_hwprofile.resourceadapter = default_adapter nonetwork_hwprofile.mappedsoftwareprofiles.append(compute_swprofile) # create compute (compute-01, compute-02, ...) nodes for n in range(1, 11): compute_node = Node(name='compute-{0:02d}.private'.format(n), state='Installed') compute_node.addHostSession = '1234' compute_node.softwareprofile = compute_swprofile compute_node.hardwareprofile = localiron_hwprofile compute_node.nics.append( Nic(ip='10.2.0.{}'.format(100 + n), mac='FF:00:00:00:00:00:{:02x}'.format(100 + n), boot=True, network=network, networkdevice=eth0_network_device)) if n in (1, 2): # compute-01 and compute-02 have all tags for tag in all_tags: compute_node.tags.append( NodeTag(name=tag['name'], value=tag['value'])) elif n in (3, 4): # compute-03 and compute-04 have 'tag1' and 'tag2' compute_node.tags.append( NodeTag(name=all_tags[0]['name'], value=all_tags[0]['value'])) compute_node.tags.append( NodeTag(name=all_tags[1]['name'], value=all_tags[1]['value'])) elif n in (5, 6): # compute-05 and compute-06 have 'tag2' and 'tag3' compute_node.tags.append( NodeTag(name=all_tags[1]['name'], value=all_tags[1]['value'])) compute_node.tags.append( NodeTag(name=all_tags[2]['name'], value=all_tags[2]['value'])) elif n == 7: # compute-07 has 'tag4' compute_node.tags.append( NodeTag(name=all_tags[3]['name'], value=all_tags[3]['value'])) elif n == 8: # compute-08 has 'tag5' compute_node.tags.append( NodeTag(name=all_tags[4]['name'], value=all_tags[4]['value'])) session.add(compute_node) # create arbitrary hardware profiles hwprofile1 = HardwareProfile(name='profile1', nameFormat='*', tags=[ HardwareProfileTag( name=all_tags[0]['name'], value=all_tags[0]['value']) ]) hwprofile2 = HardwareProfile(name='profile2', nameFormat='*', tags=[ HardwareProfileTag( name=all_tags[1]['name'], value=all_tags[1]['value']) ]) hwprofile_notags = HardwareProfile(name='notags', nameFormat='*') session.add(hwprofile1) session.add(hwprofile2) session.add(hwprofile_notags) # create arbitrary software profiles swprofile1 = SoftwareProfile(name='swprofile1', os=os_, type='compute', tags=[ SoftwareProfileTag( name=all_tags[0]['name'], value=all_tags[0]['value']) ]) swprofile2 = SoftwareProfile(name='swprofile2', os=os_, type='compute', tags=[ SoftwareProfileTag( name=all_tags[1]['name'], value=all_tags[1]['value']) ]) swprofile_notags = SoftwareProfile(name='notags', os=os_, type='compute') session.add(swprofile1) session.add(swprofile2) session.add(swprofile_notags) session.commit() return dbmgr
def getProvisioningInfo(self, nodeName): """ Get the provisioing information for a given provisioned address Returns: [provisioningInformation structure] Throws: NodeNotFound DbError """ session = DbManager().openSession() try: provisioningInfo = ProvisioningInfo() dbNode = self._nodesDbHandler.getNode(session, nodeName) if dbNode.softwareprofile: self.loadRelations(dbNode.softwareprofile, { 'partitions': True, 'packages': True, }) for component in dbNode.softwareprofile.components: self.loadRelations( component, { 'kit': True, 'os': True, 'family': True, 'os_components': True, 'osfamily_components': True, }) self.loadRelation(dbNode, 'hardwareprofile') provisioningInfo.setNode(Node.getFromDbDict(dbNode.__dict__)) globalParameters = self._globalParameterDbApi.getParameterList() # TODO: this is a terrible hack until something better comes # along. p = Parameter() p.setName('Installer') hostName = socket.gethostname().split('.', 1)[0] if '.' in dbNode.name: nodeDomain = dbNode.name.split('.', 1)[1] priInstaller = hostName + '.%s' % (nodeDomain) else: priInstaller = hostName p.setValue(priInstaller) globalParameters.append(p) provisioningInfo.setGlobalParameters(globalParameters) return provisioningInfo except TortugaException as ex: raise except Exception as ex: self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession()
def _run_installer(self, db_manager: DbManager, installer): """ Runs the installation process for a kit installer. :param db_manager: the DbManager instance :param installer: the KitInstaller instance to run the install process for """ kit = installer.get_kit() # # This method will throw KitAlreadyExists, if it does... # self._check_if_kit_exists(installer.session, kit) # # Validate eula # eula = installer.get_eula() if not eula: self._logger.debug('No EULA acceptance required') else: if not self._eula_validator.validate_eula(eula): raise EulaAcceptanceRequired( 'You must accept the EULA to install this kit') # # Runs the kit pre install method # installer.run_action('pre_install') # # Get list of operating systems supported by this installer # os_info_list = [ repo.getOsInfo() for repo in repoManager.getRepoList() ] # # Install operating system specific packages # self._install_os_packages(kit, os_info_list) # # Initialize any DB tables provided by the kit # db_manager.init_database() # # Add the kit to the database # self._kit_db_api.addKit(installer.session, kit) # # Clean up the kit archive directory # self._clean_kit_achiv_dir(kit, installer.install_path) # # Install puppet modules # installer.run_action('install_puppet_modules') # # Run post install # installer.run_action('post_install') if eula: ActionManager().logAction( 'Kit [{}] installed and EULA accepted at [{}]' ' local machine time.'.format( installer.spec, time.ctime()) ) else: ActionManager().logAction( 'Kit [{}] installed at [{}] local machine time.'.format( installer.spec, time.ctime()) )
def deleteNode(self, nodespec): """ Delete node by nodespec Raises: NodeNotFound """ installer_hostname = socket.getfqdn().split('.', 1)[0] session = DbManager().openSession() try: nodes = [] for node in self.__expand_nodespec(session, nodespec): if node.name.split('.', 1)[0] == installer_hostname: self.getLogger().info( 'Ignoring request to delete installer node' ' ([{0}])'.format(node.name)) continue nodes.append(node) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) self.__preDeleteHost(nodes) nodeErrorDict = NodesDbHandler().deleteNode(session, nodes) # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict) session.commit() # ============================================================ # Perform actions *after* node deletion(s) have been committed # to database. # ============================================================ self.__postDeleteHost(nodes_deleted) addHostSessions = set( [tmpnode['addHostSession'] for tmpnode in nodes_deleted]) if addHostSessions: AddHostManager().delete_sessions(addHostSessions) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() for nodeName in result['NodesDeleted']: # Remove the Puppet cert bhm.deletePuppetNodeCert(nodeName) bhm.nodeCleanup(nodeName) self.getLogger().info('Node [%s] deleted' % (nodeName)) # Schedule a cluster update self.__scheduleUpdate() return result except TortugaException: session.rollback() raise except Exception: session.rollback() self.getLogger().exception('Exception in NodeManager.deleteNode()') raise finally: DbManager().closeSession()
class AWSSpotdAppClass: """ App for checking on Spot Intance Requests and cleaning up requests that don't follow the proper lifecycle. """ def __init__(self, logger, polling_interval=SPOT_INSTANCE_POLLING_INTERVAL, max_register_duration=SPOT_INSTANCE_REGISTER_MAX_WAIT, dbm=None, one_time=False): self.logger = logger self.metadata_ws_api = MetadataWsApi() self.__one_time = one_time self.__polling_interval = polling_interval self.__max_register_duration = max_register_duration if dbm is None: self.__dbm = DbManager() else: self.__dbm = dbm self.__done = False self.__next_poll = 0 self.__bad_requests = [] self.__bad_request_lock = asyncio.Lock() def run(self): """ Main body of the application. Read, and optionally continue to read, known spot requests and compare against known instances. """ loop = asyncio.get_event_loop() queue = asyncio.Queue() max_tasks = 3 poller = None tasks = [] self.logger.debug('Creating %d worker tasks', max_tasks) try: # create worker coroutines tasks = [ asyncio.ensure_future(self.__worker(f'worker-{i}', queue)) for i in range(max_tasks) ] poller = asyncio.ensure_future(self.__poller(queue, tasks)) loop.run_until_complete(poller) except KeyboardInterrupt: pass except Exception as ex: # pylint: disable=broad-except self.logger.error('Error running event loop: %s', ex) finally: if not self.__done: self.__done = True if poller: loop.run_until_complete(poller) self.logger.debug('Cancelling worker tasks') for task in tasks: task.cancel() self.logger.debug('Closing asyncio loop') self.__dbm.closeSession() async def __poller(self, queue, tasks) -> NoReturn: resource_adapter = get_api('AWS') while not self.__done: if self.__next_poll < time.time(): self.logger.debug('Polling spot instance requests') results = self.metadata_ws_api.list() resource_adapter.session = self.__dbm.openSession() resource_adapter_cfgs = {} try: for result in results: if not result['key'].startswith('sir-'): # ignore any unrelated entries self.logger.debug('Ignoring metadata key [%s]', result['key']) continue with await self.__bad_request_lock: if result['key'] in self.__bad_requests: self.logger.warning( 'Invalid spot instance request [%s] will not be' ' queued', result['key']) continue # Parse the embedded value to get the corresponding # resource adapter configuration value = json.loads(result['value']) # Store the request ID in the value. This is used by the handlers later on. value['spot_instance_request_id'] = result['key'] resource_adapter_cfg = value[ 'resource_adapter_configuration'] # Cache resource adapter configurations. # First check if this one has been cached if not resource_adapter_cfg in resource_adapter_cfgs: self.logger.debug( 'Loading adapter configuration: [%s]', resource_adapter_cfg) # Get the the configuration for the spot instance request config = resource_adapter.get_config( resource_adapter_cfg) # Save the fields that we may need for other requests in this loop resource_adapter_cfgs[resource_adapter_cfg] = { 'cfg': config, 'ec2_conn': resource_adapter.getEC2Connection(config), 'max_register_duration': config.get('spot_provision_timeout', self.__max_register_duration), } # Update the record to queue to have the appropriate # cached data for the request. result['value'] = value result['ec2_conn'] = resource_adapter_cfgs[ resource_adapter_cfg]['ec2_conn'] result['max_register_duration'] = \ resource_adapter_cfgs[resource_adapter_cfg]['max_register_duration'] # enqueue spot instance request queue.put_nowait(result) except Exception as ex: # pylint: disable=broad-except self.logger.error( 'Unable to poll spot instance requests: %s', ex) finally: resource_adapter.session.close() if self.__one_time: await queue.join() self.__done = True break self.logger.debug('Sleeping for %ds', self.__polling_interval) self.__next_poll = time.time() + self.__polling_interval await asyncio.sleep(1) try: for task in tasks: await asyncio.wait_for(task, timeout=30.0) self.logger.debug('Exiting poller') except Exception as ex: # pylint: disable=broad-except self.logger.error('Unable to wait for worker tasks: %s', ex) async def __worker(self, name: str, queue: asyncio.Queue) -> NoReturn: self.logger.debug('Worker [%s] initializing...', name) while not self.__done: try: # Allow other pending co-routines to run # await asyncio.sleep(0.0) item = queue.get_nowait() except asyncio.QueueEmpty: await asyncio.sleep(0.01) continue session = self.__dbm.openSession() try: # Unpack the queued request sir_id = item['key'] spot_instance_request = item['value'] instance = item['instance'] ec2_conn = item['ec2_conn'] max_register_duration = item['max_register_duration'] node = None # Attempt to fetch the node matching the instance in the spot request. if instance: if 'id' in instance: try: node = self.__get_node_by_instance( session, instance['instance']) except Exception as ex: # pylint: disable=broad-except self.logger.debug('Unable to fetch node: %s', ex) self.logger.info( 'Worker [%s] processing spot instance request id [%s]', name, sir_id, ) with await self.__bad_request_lock: if sir_id in self.__bad_requests: self.logger.warning( 'Ignoring invalid spot instance request: [%s]', sir_id, ) continue try: await self.process_spot_instance_request( ec2_conn, session, node, spot_instance_request, max_register_duration, ) except Exception: # pylint: disable=broad-except self.logger.exception( 'Error processing spot instance request [%s]', spot_instance_request, ) finally: session.close() queue.task_done() self.logger.debug('Exiting worker') async def process_spot_instance_request( self, ec2_conn: EC2Connection, session: Session, instance: dict, spot_instance_request: dict, max_register_duration: float, ) -> None: """ :raises EC2ResponseError: """ sir_id = spot_instance_request.get('spot_instance_request_id') if sir_id is None: with await self.__bad_request_lock: self.__bad_requests.append(sir_id) return if instance and instance.state == 'Installed': self.logger.debug( 'Installed node [%s] already associated with spot instance' ' request [%s]', instance.name, sir_id) return try: result = ec2_conn.get_all_spot_instance_requests( request_ids=[sir_id], ) except boto.exception.EC2ResponseError as exc: if exc.status == 400 and \ exc.error_code in ( 'InvalidSpotInstanceRequestID.NotFound', ): spot_instance_request['status'] = 'notfound' raise create_time = dateutil.parser.isoparse(result[0].create_time) self.logger.debug( 'sir: [%s], state: [%s], status code: [%s], created at: [%s]', sir_id, result[0].state, result[0].status.code, create_time, ) jump_table = { 'active': self.__handle_active_spot_requests, 'open': self.__handle_open_spot_requests, 'closed': self.__handle_closed_spot_requests, 'cancelled': self.__handle_cancelled_spot_requests, 'failed': self.__handle_failed_spot_requests, } handler = jump_table.get(result[0].state) if handler is None: self.logger.error( 'Ignoring unknown spot instance request state: [%s]', result[0].state) return self.logger.debug('Calling handler for state: [%s]', result[0].state) await handler( result[0].status.code, sir_id, ec2_conn, result[0].instance_id, instance, spot_instance_request, create_time, session, max_register_duration, ) async def __handle_active_spot_requests( self, status_code, sir_id, ec2_conn, instance_id, instance, # pylint: disable=unused-argument spot_instance_request, create_time, session, max_register_duration # pylint: disable=unused-argument ): if status_code != 'fulfilled': return self.logger.debug('Waiting for node for spot instance' ' request [%s]', sir_id) await self.__fulfilled_request_handler( ec2_conn, session, instance_id, spot_instance_request, create_time, max_register_duration, ) async def __handle_open_spot_requests( self, status_code, sir_id, ec2_conn, # pylint: disable=unused-argument instance_id, instance, spot_instance_request, # pylint: disable=unused-argument create_time, session, max_register_duration # pylint: disable=unused-argument ): """Handle open spot instance requests""" if status_code in ('pending-fulfillment', 'price-too-low'): return if status_code not in ('capacity-oversubscribed', 'instance-terminated-by-price', 'instance-terminated-no-capacity', 'instance-terminated-capacity-oversubscribed', 'instance-terminated-launch-group-constraint'): # unknown status code self.logger.warning( 'Unrecognized open spot request status code: [%s]', status_code) return if status_code == 'capacity-oversubscribed': self.logger.info( 'spot instance request [%s] not fulfilled due to' ' oversubscription; request will remain open', sir_id, ) return async def __handle_closed_spot_requests( self, status_code, sir_id, ec2_conn, # pylint: disable=unused-argument instance_id, instance, # pylint: disable=unused-argument spot_instance_request, create_time, # pylint: disable=unused-argument session, max_register_duration # pylint: disable=unused-argument ): if status_code == 'marked-for-termination': self.logger.info( 'Instance [%s] marked for termination', instance_id, ) return if status_code == 'system-error': self.logger.warning( 'Reported AWS/EC2 system error for spot instance request id' ' [%s]', sir_id) return if status_code not in ( 'instance-terminated-by-user', 'instance-terminated-by-price', 'instance-terminated-no-capacity', 'instance-terminated-capacity-oversubscribed', 'instance-terminated-launch-group-constraint', ): # unknown status code self.logger.warning( 'Unrecognized closed spot request status code: [%s]', status_code) return # Instance is terminated. We can remove the request. self.logger.info( 'Deleting spot instance request id [%s] for terminated instance', sir_id) self.metadata_ws_api.deleteMetadata(filter_key=sir_id, ) async def __handle_cancelled_spot_requests(self, status_code, sir_id, ec2_conn, instance_id, instance, spot_instance_request, create_time, session, max_register_duration): if status_code == 'canceled-before-fulfillment': # Never had a instance so we must remove the request. self.logger.info( 'Deleting spot instance request id [%s]', sir_id, ) self.metadata_ws_api.deleteMetadata(filter_key=sir_id, ) return if status_code == 'request-canceled-and-instance-running': if instance is None: # Need to check if launch has instance registered. await self.__fulfilled_request_handler( ec2_conn, session, instance_id, spot_instance_request, create_time, max_register_duration, ) if status_code in ( 'instance-terminated-by-user', 'instance-terminated-capacity-oversubscribed', ): self.logger.info( 'Deleting spot instance request id [%s] for terminated instance [%s]', sir_id, instance_id) self.metadata_ws_api.deleteMetadata(filter_key=sir_id, ) async def __handle_failed_spot_requests( self, status_code, sir_id, ec2_conn, # pylint: disable=unused-argument instance_id, instance, spot_instance_request, # pylint: disable=unused-argument create_time, session, max_register_duration): # pylint: disable=unused-argument # This request is dead in the water; nothing more can happen return async def __fulfilled_request_handler( self, ec2_conn: EC2Connection, session: Session, instance_id: str, spot_instance_request: dict, create_time: datetime.datetime, max_register_duration: float, ): """Called when processing valid spot instance request""" sir_id = spot_instance_request.get('spot_instance_request_id') node = self.__get_node_by_instance(session, instance_id) if not node or node.state != 'Installed': waiting_seconds = time.time() - create_time.timestamp() if waiting_seconds > max_register_duration: self.logger.warn( 'Terminating instance [%s] as it failed to register in [%d] second(s)', instance_id, max_register_duration) ec2_conn.terminate_instances(instance_ids=[instance_id]) else: self.logger.info( 'Unable to find instance in database: [%s], instance will be terminated ' 'in [%d] second(s) if it fails to register.', instance_id, max_register_duration - waiting_seconds) return None result = self.__get_spot_instance_metadata(session, sir_id) if not result: self.logger.error( 'Unable to find matching spot instance request: %s', sir_id, ) return None self.logger.info( 'Matching spot instance request [%s] to instance id [%s]', sir_id, instance_id) node.instance.instance_metadata.append(result) session.commit() def __get_spot_instance_metadata( self, session: Session, sir_id: str) -> Optional[InstanceMetadata]: try: return session.query(InstanceMetadata).filter( InstanceMetadata.key == sir_id).one() # noqa except NoResultFound: pass return None def __get_node_by_instance(self, session: Session, instance_id: str) -> Optional[Node]: try: return session.query(InstanceMapping).filter( InstanceMapping.instance == instance_id # noqa ).one().node except NoResultFound: pass return None
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pprint from tortuga.db.dbManager import DbManager from tortuga.db.hardwareProfilesDbHandler import HardwareProfilesDbHandler from tortuga.db.softwareProfilesDbHandler import SoftwareProfilesDbHandler from tortuga.resourceAdapter.vmware_adapter import Vmware_adapter session = DbManager().openSession() dbSoftwareProfile = SoftwareProfilesDbHandler().getSoftwareProfile( session, 'BasicCompute') dbHardwareProfile = HardwareProfilesDbHandler().getHardwareProfile( session, 'vmware2') adapter = Vmware_adapter() config_dict = adapter._getResourceAdapterConfig(dbHardwareProfile, dbSoftwareProfile) pprint.pprint(config_dict)
def on_init(self): TortugaCeleryApp.app = Application() TortugaCeleryApp.dbm = DbManager()
def dbm(): dbmgr = DbManager(create_engine('sqlite:///:memory:', echo=False)) dbmgr.init_database() rhel7_os_family_info = osFamilyInfo.OsFamilyInfo('rhel', '7', 'x86_64') os_info = osInfo.OsInfo('centos', '7.4', 'x86_64') os_info.setOsFamilyInfo(rhel7_os_family_info) installer_fqdn = getfqdn() settings = { 'language': 'en', 'keyboard': 'en_US', 'timezone': 'UTC', 'utc': 'true', 'intWebPort': '8008', 'intWebServicePort': '8444', 'adminPort': '8443', 'eulaAccepted': 'true', 'depotpath': '/opt/tortuga/depot', 'osInfo': os_info, 'fqdn': installer_fqdn, 'installer_software_profile': 'Installer', 'installer_hardware_profile': 'Installer', } with dbmgr.session() as session: primeDb(session, settings) init_global_parameters(session, settings) # create sample tags all_tags = [{ 'name': 'tag{:d}'.format(idx), 'value': 'value{:d}'.format(idx) } for idx in range(1, 5 + 1)] installer_node = session.query(Node).filter( Node.name == installer_fqdn).one() os_ = session.query(OperatingSystem).filter( OperatingSystem.name == 'centos').one() rhel7_os_family = session.query(OperatingSystemFamily).filter( OperatingSystemFamily.name == 'rhel').one() # add add'l operating system/family rhel75_os = OperatingSystem(name='rhel', version='7.5', arch='x86_64') rhel75_os.family = rhel7_os_family session.add(rhel75_os) admin = Admin(username='******', password=pbkdf2_sha256.hash('password'), realname='realname', description='description') session.add(admin) eth1_network_device = NetworkDevice(name='eth1') # Add dummy provisioning network network = Network() network.address = '10.2.0.0' network.netmask = '255.255.255.0' network.name = 'Provisioning network on eth1' network.type = 'provision' session.add(network) # create 'hardwareprofilenetwork' entry hwpn1 = HardwareProfileNetwork( hardwareprofile=installer_node.hardwareprofile, network=network, networkdevice=eth1_network_device) # create nic on installer installer_nic = Nic() installer_nic.ip = '10.2.0.1' installer_nic.network = network installer_nic.networkdevice = eth1_network_device installer_node.nics = [installer_nic] # create 'base' kit kit = Kit() kit.name = 'base' kit.version = '7.1.1' kit.iteration = '0' kit.description = 'Sample base kit' installer_component = Component(name='installer', version='7.0') installer_component.family = [rhel7_os_family] installer_component.kit = kit core_component = Component(name='core', version='7.0', description='Compute component') core_component.family = [rhel7_os_family] core_component.kit = kit session.add(kit) # create OS kit os_kit = Kit(name='centos', version='7.4', iteration='0') os_kit.isOs = True os_component = Component(name='centos-7.4-x86_64', version='7.4') os_component.os = [os_] os_component.kit = os_kit os_kit.components.append(os_component) session.add(os_kit) # create resource adapter kit ra_kit = Kit(name='awsadapter', version='0.0.1', iteration='0') ra_component = Component(name='management', version='0.0.1') ra_component.family.append(rhel7_os_family) ra_kit.components.append(ra_component) installer_node.softwareprofile.components.append(ra_component) installer_node.softwareprofile.components.append(installer_component) session.commit() # create 'default' resource adapter default_adapter = ResourceAdapter( name=DEFAULT_CONFIGURATION_PROFILE_NAME, kit=kit, ) # create resource adapter aws_adapter = ResourceAdapter(name='AWS') aws_adapter.kit = ra_kit aws_adapter_cfg = ResourceAdapterConfig( name=DEFAULT_CONFIGURATION_PROFILE_NAME, description='Example default resource adapter configuration') with mock_ec2_deprecated(): ec2_conn = boto.ec2.connect_to_region('us-east-1') amis = ec2_conn.get_all_images() aws_adapter_cfg.configuration.append( ResourceAdapterSetting(key='ami', value=amis[0].id)) aws_adapter.resource_adapter_config.append(aws_adapter_cfg) # add second resource adapter configuration aws_adapter_cfg2 = ResourceAdapterConfig(name='nondefault', admin=admin) aws_adapter_cfg2.configuration.append( ResourceAdapterSetting(key='another_key', value='another_value')) session.add(aws_adapter) # create 'AWS' hardware profile aws_hwprofile = HardwareProfile(name='AWS') aws_hwprofile.location = 'remote' aws_hwprofile.resourceadapter = aws_adapter session.add(aws_hwprofile) aws_hwprofile2 = HardwareProfile(name='aws2', location='remote', resourceadapter=aws_adapter, nameFormat='*') session.add(aws_hwprofile2) # create 'compute' software profile compute_swprofile = SoftwareProfile(name='compute') compute_swprofile.os = os_ compute_swprofile.components = [core_component] compute_swprofile.type = 'compute' # create 'compute2' software profile compute2_swprofile = SoftwareProfile(name='compute2', os=os_, components=[core_component], type='compute') # map 'AWS' to 'compute' aws_hwprofile.mappedsoftwareprofiles.append(compute_swprofile) aws_hwprofile2.mappedsoftwareprofiles.append(compute_swprofile) # create 'localiron' hardware profile localiron_hwprofile = HardwareProfile(name='localiron', nameFormat='compute-#NN') localiron_hwprofile.resourceadapter = default_adapter localiron_hwprofile.mappedsoftwareprofiles.append(compute_swprofile) localiron_hwprofile.mappedsoftwareprofiles.append(compute2_swprofile) localiron_hwprofile.hardwareprofilenetworks.append(hwpn1) # create 'nonetwork' hardware profile nonetwork_hwprofile = HardwareProfile(name='nonetwork') nonetwork_hwprofile.resourceadapter = default_adapter nonetwork_hwprofile.mappedsoftwareprofiles.append(compute_swprofile) eth0_networkdevice = NetworkDevice(name='eth0') # create compute (compute-01, compute-02, ...) nodes for n in range(1, 11): compute_node = Node(name='compute-{0:02d}.private'.format(n), state='Installed') compute_node.addHostSession = '1234' compute_node.softwareprofile = compute_swprofile compute_node.hardwareprofile = localiron_hwprofile compute_node.nics.append( Nic(ip='10.2.0.{}'.format(100 + n), mac='FF:00:00:00:00:00:{:02x}'.format(100 + n), boot=True, network=network, networkdevice=eth0_networkdevice)) if n in (1, 2): # compute-01 and compute-02 have all tags compute_node.tags = [ NodeTag( name=tag['name'], value=tag['value'], ) for tag in all_tags ] elif n in (3, 4): # compute-03 and compute-04 have 'tag1' and 'tag2' compute_node.tags = [ NodeTag( name=all_tags[0]['name'], value=all_tags[0]['value'], ), NodeTag( name=all_tags[1]['name'], value=all_tags[1]['value'], ), ] elif n in (5, 6): # compute-05 and compute-06 have 'tag2' and 'tag3' compute_node.tags = [ NodeTag( name=all_tags[1]['name'], value=all_tags[1]['value'], ), NodeTag( name=all_tags[2]['name'], value=all_tags[2]['value'], ), ] elif n == 7: # compute-07 has 'tag4' compute_node.tags = [ NodeTag( name=all_tags[3]['name'], value=all_tags[3]['value'], ), ] elif n == 8: # compute-08 has 'tag5' compute_node.tags = [ NodeTag( name=all_tags[4]['name'], value=all_tags[4]['value'], ), ] session.add(compute_node) # create arbitrary aws nodes for idx in range(1, 10): new_node = Node( name='ip-10-10-10-{:0d}.ec2.internal'.format(idx), hardwareprofile=aws_hwprofile, softwareprofile=compute_swprofile, ) new_node.instance = InstanceMapping( instance='i-{:08x}'.format(idx)) new_node.instance.resource_adapter_configuration = aws_adapter_cfg session.add(new_node) # create arbitrary hardware profiles hwprofile1 = HardwareProfile( name='profile1', tags=[ HardwareProfileTag( name=all_tags[0]['name'], value=all_tags[0]['value'], ), ], ) hwprofile2 = HardwareProfile( name='profile2', tags=[ HardwareProfileTag( name=all_tags[1]['name'], value=all_tags[1]['value'], ), ], ) session.add(hwprofile1) session.add(hwprofile2) # create arbitrary software profiles SoftwareProfile( name='swprofile1', os=os_, type='compute', tags=[ SoftwareProfileTag(name=all_tags[0]['name'], value=all_tags[0]['value']), ], ) SoftwareProfile( name='swprofile2', os=os_, type='compute', tags=[ SoftwareProfileTag( name=all_tags[1]['name'], value=all_tags[1]['value'], ), ], ) session.commit() return dbmgr
def addHardwareProfile(self, hardwareProfile, session=None): """ Insert hardwareProfile into the db. Returns: (none) Throws: HardwareProfileAlreadyExists DbError """ # Keep local 'session' instance. If 'session' is None, # ensure transaction is committed before returning to the # caller, otherwise the caller is responsible. On exceptions, # the rollback is performed regardless. _session = session if _session is None: _session = DbManager().openSession() try: try: self._hardwareProfilesDbHandler.getHardwareProfile( _session, hardwareProfile.getName()) raise HardwareProfileAlreadyExists( 'Hardware profile [%s] already exists' % (hardwareProfile)) except HardwareProfileNotFound as ex: pass dbHardwareProfile = self.__populateHardwareProfile( _session, hardwareProfile) _session.add(dbHardwareProfile) if session is None: _session.commit() self.getLogger().info('Added hardware profile [%s]' % (dbHardwareProfile.name)) except TortugaException as ex: _session.rollback() raise except Exception as ex: _session.rollback() self.getLogger().exception('%s' % ex) raise finally: if session is None: DbManager().closeSession()
def runCommand(self): args = self.parseArgs() with DbManager().session() as session: args.func(session, args)
def deleteHardwareProfile(self, name): """ Delete hardwareProfile from the db. Returns: None Throws: HardwareProfileNotFound DbError TortugaException """ session = DbManager().openSession() try: hwProfile = self._hardwareProfilesDbHandler.getHardwareProfile( session, name) if hwProfile.nodes: raise TortugaException( 'Unable to remove hardware profile with associated' ' nodes') # First delete the mappings hwProfile.mappedsoftwareprofiles = [] self.getLogger().debug( 'Marking hardware profile [%s] for deletion' % (name)) session.delete(hwProfile) session.commit() except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession()