def generate_config(self): """Generate configuration from templates""" cfgmgr = ConfigManager() script_dir = os.path.join(cfgmgr.getRoot(), 'rules') if not os.path.exists(script_dir): print('Creating rules directory \"{0}\"'.format(script_dir)) os.makedirs(script_dir) else: if not self.getOptions().force: sys.stderr.write('Script directory \"{0}\" already exists.\n' 'Use --force to overwrite current' ' scripts\n'.format(script_dir)) sys.exit(1) print('Overwriting any scripts in directory \"{0}\"'.format( script_dir)) # Determine UGE cell directory from environment if not os.getenv('SGE_ROOT') or not os.getenv('SGE_CELL'): print('Error: UGE environment is not sourced', file=sys.stderr) sys.exit(1) cell_dir = os.path.join(os.getenv('SGE_ROOT'), os.getenv('SGE_CELL')) template_vars = { 'tortuga_root': cfgmgr.getRoot(), 'uge_cell_dir': cell_dir, 'script_dir': script_dir, 'burst_swprofile': self.getOptions().software_profile, 'burst_hwprofile': self.getOptions().hardware_profile, 'burst_queue': 'burst.q', 'polling_interval': self.getOptions().polling_interval, 'slots_per_host': self.getOptions().slots_per_host, } env = Environment(loader=FileSystemLoader('templates'), undefined=StrictUndefined) for filename in glob.glob('templates/*.j2'): # print('Processing template {0}'.format( # os.path.basename(filename))) template = env.get_template(os.path.basename(filename)) dstfile = os.path.join( script_dir, os.path.splitext(os.path.basename(filename))[0]) print(' - writing {0}'.format(os.path.basename(dstfile))) with open(dstfile, 'w') as outfp: template.stream(template_vars).dump(outfp)
def openid_client_config_path(): """ The path to the OpenID Connect Client configuration. This is a JSON file with the following three settings: { "issuer": "http://example.com:123456/dex", "client_id": "client-id-1234", "client_secret": "AbcDef134..." } If this file does not exist, JWT authentication will be disabled. """ cm = ConfigManager() return os.path.join(cm.getRoot(), 'etc', JwtAuthenticationMethod.OPENID_CONNECT_CONFIG)
def getApi(moduleName): Logger = logging.getLogger(__name__) Logger.addHandler(logging.NullHandler()) # Add the module's directory to PYTHONPATH cm = ConfigManager() moduleDir = '%s/lib/tortuga/resourceAdapter/san' % cm.getRoot() import sys sys.path.insert(0, moduleDir) # Import the module try: mod = __import__(moduleName) except Exception as msg: Logger.error("Can't import module [%s]; %s" % (moduleName, msg)) raise # Create the class className = moduleName[0].upper() + moduleName[1:] try: klass = getattr(mod, className) except Exception as msg: Logger.error("Can't create class [%s] from module [%s]; %s" % (className, moduleName, msg)) raise # Instantiate an object of the class try: obj = klass() except Exception as msg: Logger.error("Can't create object of class [%s] from module [%s]; %s" % (className, moduleName, msg)) raise # Undo changes to PYTHONPATH. del sys.path[0] return obj
class SoftwareProfileManager(TortugaObjectManager): \ # pylint: disable=too-many-public-methods BASE_KIT_NAME = 'base' def __init__(self): super(SoftwareProfileManager, self).__init__() self._sp_db_api = SoftwareProfileDbApi() self._node_db_api = NodeDbApi() self._component_db_api = ComponentDbApi() self._global_param_db_api = GlobalParameterDbApi() self._kit_db_api = KitDbApi() self._config_manager = ConfigManager() self._logger = logging.getLogger(SOFTWARE_PROFILE_NAMESPACE) def getSoftwareProfileList(self, session: Session, tags=None): """Return all of the softwareprofiles with referenced components in this softwareprofile """ results = self._sp_db_api.getSoftwareProfileList(session, tags=tags) for software_profile_obj in results: # load any available software profile metadata software_profile_obj.setMetadata( self.get_software_profile_metadata( session, software_profile_obj.getName())) return results def addAdmin(self, session: Session, softwareProfileName, adminUsername): """ Add an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound SoftwareProfileNotFound """ return self._sp_db_api.addAdmin(session, softwareProfileName, adminUsername) def deleteAdmin(self, session: Session, softwareProfileName, adminUsername): """ Remove an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound SoftwareProfileNotFound """ return self._sp_db_api.deleteAdmin(session, softwareProfileName, adminUsername) def updateSoftwareProfile(self, session: Session, softwareProfileObject): self._logger.debug('Updating software profile: %s' % (softwareProfileObject.getName())) # # First get the object from the db we are updating # existing_swp = self.getSoftwareProfileById( session, softwareProfileObject.getId()) # # Set parameters that we will not allow updating # softwareProfileObject.setOsInfo(existing_swp.getOsInfo()) softwareProfileObject.setOsId(existing_swp.getOsId()) softwareProfileObject.setType(existing_swp.getType()) # # Do the DB update # self._sp_db_api.updateSoftwareProfile(session, softwareProfileObject) # # Get the new version # new_swp = self.getSoftwareProfileById(session, softwareProfileObject.getId()) # # If the tags have changed, fire the tags changed event # if existing_swp.getTags() != new_swp.getTags(): SoftwareProfileTagsChanged.fire( softwareprofile_id=str(new_swp.getId()), softwareprofile_name=new_swp.getName(), tags=new_swp.getTags(), previous_tags=existing_swp.getTags()) def getSoftwareProfile( self, session: Session, name: str, optionDict: Optional[Dict[str, bool]] = None) -> SoftwareProfile: """ Retrieve software profile by name """ software_profile_obj: SoftwareProfile = \ self._sp_db_api.getSoftwareProfile( session, name, optionDict=optionDict) # load any available software profile metadata software_profile_obj.setMetadata( self.get_software_profile_metadata(session, name)) return software_profile_obj def getSoftwareProfileById( self, session: Session, id_: int, optionDict: Optional[Dict[str, bool]] = None) -> SoftwareProfile: """ Retrieve software profile by id """ software_profile_obj: SoftwareProfile = \ self._sp_db_api.getSoftwareProfileById( session, id_, optionDict=optionDict) # load any available software profile metadata software_profile_obj.setMetadata( self.get_software_profile_metadata(session, software_profile_obj.getName())) return software_profile_obj def _getCoreComponentForOsInfo(self, session: Session, osInfo): # Find core component baseKit = None for baseKit in self._kit_db_api.getKitList(session): if not baseKit.getName() == self.BASE_KIT_NAME: continue break else: raise KitNotFound('Kit [%s] not found.' % (self.BASE_KIT_NAME)) baseComp = None for baseComp in baseKit.getComponentList(): if baseComp.getName() != 'core': continue break else: raise ComponentNotFound('Component [%s] not found in kit [%s]' % ('core', baseKit.getName())) comp = self._component_db_api.getBestMatchComponent( session, baseComp.getName(), baseComp.getVersion(), osInfo, baseKit.getId()) comp.setKit(baseKit) return comp def _getOsInfo(self, session: Session, bOsMediaRequired: bool): if not bOsMediaRequired: # As a placeholder, use the same OS as the installer # Find installer node entry node = self._node_db_api.getNode(session, ConfigManager().getInstaller(), {'softwareprofile': True}) return node.getSoftwareProfile().getOsInfo() # Use available operating system kit; raise exception if # multiple available os_kits = self._kit_db_api.getKitList(session, os_kits_only=True) if not os_kits: raise KitNotFound('No operating system kit installed') if len(os_kits) > 1: raise KitNotFound( 'Multiple OS kits defined; use --os option to specify' ' operating system') kit = self._kit_db_api.getKit(session, os_kits[0].getName(), os_kits[0].getVersion(), '0') components = kit.getComponentList() if not components: raise ComponentNotFound('Malformed operating system kit [%s]' % (os_kits)) osinfo_list = components[0].getOsInfoList() if len(osinfo_list) > 1: raise ComponentNotFound( 'Multiple operating system components for kit [%s];' ' use --os argument to specify operating system' % (os_kits[0])) return osinfo_list[0] def createSoftwareProfile(self, session: Session, swProfileSpec, settingsDict=None): """ Exceptions: ConfigurationError NetworkNotFound ComponentNotFound KitNotFound OSError """ if settingsDict == None: settingsDict = {} bOsMediaRequired = settingsDict.get('bOsMediaRequired', True) unmanagedProfile = settingsDict.get('unmanagedProfile', False) # Validate software profile name validation.validateProfileName(swProfileSpec.getName()) # Insert default description for software profile if swProfileSpec.getDescription() is None: swProfileSpec.setDescription('%s Nodes' % (swProfileSpec.getName())) self._logger.debug('Creating software profile [%s]' % (swProfileSpec)) osInfo = swProfileSpec.getOsInfo() \ if swProfileSpec.getOsInfo() else self._getOsInfo( session, bOsMediaRequired) # If we're creating an unmanaged software profile (no # DHCP/PXE/kickstart/OS) just create it now and we're done if unmanagedProfile: self._sp_db_api.addSoftwareProfile(session, swProfileSpec) else: if bOsMediaRequired and swProfileSpec.getOsInfo(): try: self._kit_db_api.getKit( session, swProfileSpec.getOsInfo().getName(), swProfileSpec.getOsInfo().getVersion(), '0') except KitNotFound: self._logger.error('OS kit for [%s] not found' % (swProfileSpec.getOsInfo())) raise else: swProfileSpec.setOsInfo(osInfo) # Get component manager for appropriate OS family osConfig = osHelper.getOsInfo(osInfo.getName(), osInfo.getVersion(), osInfo.getArch()) osObjFactory = osUtility.getOsObjectFactory( osConfig.getOsFamilyInfo().getName()) # Need to be fancy with components spComponents = swProfileSpec.getComponents() swProfileSpec.setComponents(TortugaObjectList()) bFoundOsComponent = False bFoundCoreComponent = False components = [] # Iterate over components, adding them to the software profile for c in spComponents: cobj = self._component_db_api.getBestMatchComponent( session, c.getName(), c.getVersion(), osInfo, c.getKit().getId()) k = cobj.getKit() if k.getIsOs(): # This component is a member of the OS kit, set the flag bFoundOsComponent = True elif k.getName() == 'base' and c.getName() == 'core': # Found the 'core' component in 'base' kit bFoundCoreComponent = True components.append(cobj) # If the operating system is undefined for this software # profile, use the same OS as the installer. if bOsMediaRequired and not bFoundOsComponent: # Find OS component osCompName = '%s-%s-%s' % ( osInfo.getName(), osInfo.getVersion(), osInfo.getArch()) self._logger.debug('Automatically adding OS component [%s]' ' (not specified in template)' % (osCompName)) try: osComponent = self._component_db_api.getComponent( session, osCompName, osInfo.getVersion(), osInfo, {'kit': True}) components.append(osComponent) except ComponentNotFound: # Cannot find OS component, don't freak out pass # Ensure 'core' component is enabled if not bFoundCoreComponent: # Attempt to automatically add the core component, only # if one exists for this OS try: comp = self._getCoreComponentForOsInfo(session, osInfo) self._logger.debug('Automatically adding [core] component' ' (not specified in template)') components.append(comp) except ComponentNotFound: self._logger.warning( 'OS [{}] does not have a compatible \'core\'' ' component'.format(osInfo)) # Initialize values for kernel, kernelParams, and initrd if not swProfileSpec.getKernel(): swProfileSpec.setKernel( osObjFactory.getOsSysManager().getKernel(osInfo)) if not swProfileSpec.getInitrd(): swProfileSpec.setInitrd( osObjFactory.getOsSysManager().getInitrd(osInfo)) # Add the software profile self._sp_db_api.addSoftwareProfile(session, swProfileSpec) # Enable components in one fell swoop for comp in components: self._logger.debug('Enabling component [%s]' % (comp.getName())) if comp.getKit().getIsOs(): # Don't use enableComponent() on OS kit self._component_db_api.addComponentToSoftwareProfile( session, comp.getId(), swProfileSpec.getId()) continue self.enableComponent(session, swProfileSpec.getName(), comp.getKit().getName(), comp.getKit().getVersion(), comp.getKit().getIteration(), comp.getName(), comp.getVersion()) # # Fire the tags changed event for all creates that have tags # # Get the latest version from the db in case the create method # added some embellishments # swp = self.getSoftwareProfile(session, swProfileSpec.getName()) if swp.getTags(): SoftwareProfileTagsChanged.fire(softwareprofile_id=str( swp.getId()), softwareprofile_name=swp.getName(), tags=swp.getTags(), previous_tags={}) def _getComponent(self, kit, compName, compVersion): \ # pylint: disable=no-self-use # Iterate over component list, looking for a match comp = None for comp in kit.getComponentList(): if comp.getName() == compName and \ comp.getVersion() == compVersion: break else: raise ComponentNotFound("Component [%s-%s] not found in kit [%s]" % (compName, compVersion, kit)) return comp def _get_kit_by_component(self, session: Session, comp_name, comp_version=None): """ Gets a kit by compoent name/version. :param comp_name: the name of the component :param comp_version: the version of the component :raises KitNotFound: :raises ComponentNotFound: """ kit_list = self._kit_db_api.getKitList(session) kits = [ kit for kit in kit_list for component in kit.getComponentList() if component.getName() == comp_name and ( comp_version is None or component.getVersion() == comp_version) ] if not kits: raise KitNotFound('Kit containing component [%s] not found' % (comp_name)) if len(kits) > 1: raise ComponentNotFound( 'Kit name must be specified, multiple kits contain ' 'component: {}'.format(comp_name)) return kits[0] def enableComponent(self, session: Session, software_profile_name: str, kit_name: str, kit_version: str, kit_iteration: str, comp_name: str, comp_version: Optional[str] = None): """ Enable a component on a software profile. :param software_profile_name: the name of the software profile :param kit_name: the name of the kit :param kit_version: the version of the kit :param kit_iteration: the iteration of the kit :param comp_name: the name of the component :param comp_version: the version of the component :raises KitNotFound: :raises SoftwareProfileNotFound: :raises ComponentNotFound: """ kit, comp_version = self._get_kit_and_component_version( session, kit_name, kit_version, kit_iteration, comp_name, comp_version) software_profile = self.getSoftwareProfile(session, software_profile_name, {'os': True}) if kit.getIsOs(): best_match_component = self._enable_os_kit_component( session, kit, comp_name, comp_version, software_profile) else: best_match_component = self._enable_kit_component( session, kit, comp_name, comp_version, software_profile) if not best_match_component: self._logger.info('Component not enabled: {}'.format(comp_name)) else: self._logger.info( 'Enabled component on software profile: {} -> {}'.format( best_match_component, software_profile)) def _get_kit_and_component_version(self, session: Session, kit_name, kit_version, kit_iteration, comp_name, comp_version=None): """ Gets a Kit instance and component version. :param kit_name: the name of the kit :param kit_version: the version of the kit :param kit_iteration: the iteration of the kit :param comp_name: the component name :param comp_version: the component version (optional) :return: a tuple, consisting of (Kit, component_version) """ kit = None if kit_name is None: kit = self._get_kit_by_component(session, comp_name, comp_version=comp_version) # # Get component version if required # if comp_version is None: for component in kit.getComponentList(): if component.getName() == comp_name: comp_version = component.getVersion() break elif kit_version is None or kit_iteration is None: kits_found = 0 for k in self._kit_db_api.getKitList(session): if k.getName() == kit_name and \ (kit_version is None or k.getVersion() == kit_version) and \ (kit_iteration is None or k.getIteration() == kit_iteration): kit = k kits_found += 1 if kits_found > 1: if kit_version is not None: raise KitNotFound('Multiple kits found: {}-{}'.format( kit_name, kit_version)) else: raise KitNotFound( 'Multiple kits found {}'.format(kit_name)) else: kit = self._kit_db_api.getKit(session, kit_name, kit_version, kit_iteration) if kit is None: raise KitNotFound('Kit [%s] not found' % (Kit(kit_name, kit_version, kit_iteration))) return kit, comp_version def _enable_kit_component(self, session: Session, kit, comp_name, comp_version, software_profile): """ Enables a regular kit component on a specific software profile. :param kit: the Kit instance, whose component is being enabled :param comp_name: the name of the component to enable :param comp_version: the version of the component to enable :param software_profile: the software profile on which the component will be enabled :return: the Component instance that was enabled """ kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration()) installer = get_kit_installer(kit_spec)() installer.session = session comp_installer = installer.get_component_installer(comp_name) if comp_installer is None: raise ComponentNotFound('Component [%s] not found in kit [%s]' % (comp_name, kit)) if not comp_installer.is_enableable(software_profile): self._logger.warning('Component cannot be enabled: {}'.format( comp_installer.spec)) return None comp_installer.run_action('pre_enable', software_profile.getName()) best_match_component = self._add_component_to_software_profile( session, kit, comp_name, comp_version, software_profile) comp_installer.run_action('enable', software_profile.getName()) comp_installer.run_action('post_enable', software_profile.getName()) return best_match_component def _enable_os_kit_component(self, session: Session, kit, comp_name, comp_version, software_profile): """ Enables an OS kit component on a specific software profile. :param kit: the OS Kit instance, whose component is being enabled :param comp_name: the name of the component to enable :param comp_version: the version of the component to enable :param software_profile: the software profile on which the component will be enabled :return: the Component instance that was enabled """ return self._add_component_to_software_profile(session, kit, comp_name, comp_version, software_profile) def _add_component_to_software_profile(self, session: Session, kit, comp_name, comp_version, software_profile): """ Adds a kit to a software profile. This is a data-only operation, as no pre/post enable actions are called. :param kit: the OS Kit instance, whose component is being added :param comp_name: the name of the component to add :param comp_version: the version of the component to add :param software_profile: the software profile to which the component will be added :return: the Component instance that was added """ best_match_component = \ self._component_db_api.getBestMatchComponent( session, comp_name, comp_version, software_profile.getOsInfo(), kit.getId()) self._component_db_api.addComponentToSoftwareProfile( session, best_match_component.getId(), software_profile.getId()) return best_match_component def disableComponent(self, session: Session, software_profile_name, kit_name, kit_version, kit_iteration, comp_name, comp_version=None): \ # pylint: disable=unused-argument """ Disables a component on a software profile. :param software_profile_name: the name of the software profile :param kit_name: the name of the kit :param kit_version: the version of the kit :param kit_iteration: the iteration of the kit :param comp_name: the name of the component :param comp_version: the version of the component :raises KitNotFound: :raises SoftwareProfileNotFound: :raises ComponentNotFound: """ kit, comp_version = self._get_kit_and_component_version( session, kit_name, kit_version, kit_iteration, comp_name) software_profile = self.getSoftwareProfile(session, software_profile_name, {'os': True}) if kit.getIsOs(): best_match_component = self._disable_os_kit_component( session, kit, comp_name, comp_version, software_profile) else: best_match_component = self._disable_kit_component( session, kit, comp_name, comp_version, software_profile) self._logger.info( 'Disabled component on software profile: {} -> {}'.format( best_match_component, software_profile)) def _disable_kit_component(self, session, kit, comp_name, comp_version, software_profile): """ Disables a regular kit component on a specific software profile. :param kit: the Kit instance, whose component is being disabled :param comp_name: the name of the component to disable :param comp_version: the version of the component to disable :param software_profile: the software profile on which the component will be disable :return: the Component instance that was disabled """ kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration()) installer = get_kit_installer(kit_spec)() installer.session = session comp_installer = installer.get_component_installer(comp_name) if comp_installer is None: raise ComponentNotFound('Component [%s] not found in kit [%s]' % (comp_name, kit)) comp_installer.run_action('pre_disable', software_profile.getName()) comp_installer.run_action('disable', software_profile.getName()) best_match_component = \ self._remove_component_from_software_profile( session, kit, comp_name, comp_version, software_profile) comp_installer.run_action('post_disable', software_profile.getName()) return best_match_component def _disable_os_kit_component(self, session, kit, comp_name, comp_version, software_profile): """ Enables an OS kit component on a specific software profile. :param kit: the OS Kit instance, whose component is being disabled :param comp_name: the name of the component to disable :param comp_version: the version of the component to disable :param software_profile: the software profile on which the component will be disabled :return: the Component instance that was disabled """ return self._remove_component_from_software_profile( session, kit, comp_name, comp_version, software_profile) def _remove_component_from_software_profile(self, session: Session, kit, comp_name, comp_version, software_profile): """ Removes a kit to a software profile. This is a data-only operation, as no pre/post disable actions are called. :param kit: the OS Kit instance, whose component is being removed :param comp_name: the name of the component to remove :param comp_version: the version of the component to remove :param software_profile: the software profile to which the component will be removed :return: the Component instance that was removed """ best_match_component = self._component_db_api.getBestMatchComponent( session, comp_name, comp_version, software_profile.getOsInfo(), kit.getId()) self._component_db_api.deleteComponentFromSoftwareProfile( session, best_match_component.getId(), software_profile.getId()) return best_match_component def deleteSoftwareProfile(self, session: Session, name): """ Delete software profile by name Raises: SoftwareProfileNotFound """ self._sp_db_api.deleteSoftwareProfile(session, name) # Remove all flags for software profile swProfileFlagPath = os.path.join(self._config_manager.getRoot(), 'var/run/actions/%s' % (name)) if os.path.exists(swProfileFlagPath): shutil.rmtree(swProfileFlagPath) self._logger.info('Deleted software profile [%s]' % (name)) def getNodeList(self, session: Session, softwareProfileName): return self._sp_db_api.getNodeList(session, softwareProfileName) def getEnabledComponentList(self, session: Session, name): """ Get the list of enabled components """ return self._sp_db_api.getEnabledComponentList(session, name) def getPartitionList(self, session: Session, softwareProfileName): """ Get list of partitions. """ return self._sp_db_api.getPartitionList(session, softwareProfileName) def addUsableHardwareProfileToSoftwareProfile( self, session: Session, hardwareProfileName: str, softwareProfileName: str) -> None: """ Map software profile to hardware profile """ self._logger.info( 'Mapping hardware profile [%s] to software profile [%s]', hardwareProfileName, softwareProfileName) self._sp_db_api.addUsableHardwareProfileToSoftwareProfile( session, hardwareProfileName, softwareProfileName) def deleteUsableHardwareProfileFromSoftwareProfile(self, session: Session, hardwareProfileName, softwareProfileName): return self._sp_db_api.deleteUsableHardwareProfileFromSoftwareProfile( session, hardwareProfileName, softwareProfileName) def copySoftwareProfile(self, session: Session, srcSoftwareProfileName, dstSoftwareProfileName): validation.validateProfileName(dstSoftwareProfileName) self._logger.info('Copying software profile [%s] to [%s]', srcSoftwareProfileName, dstSoftwareProfileName) self._sp_db_api.copySoftwareProfile(session, srcSoftwareProfileName, dstSoftwareProfileName) # # Fire the tags changed event for all copies that have tags # swp = self.getSoftwareProfile(session, dstSoftwareProfileName) if swp.getTags(): SoftwareProfileTagsChanged.fire(softwareprofile_id=str( swp.getId()), softwareprofile_name=swp.getName(), tags=swp.getTags(), previous_tags={}) def getUsableNodes(self, session: Session, softwareProfileName): return self._sp_db_api.getUsableNodes(session, softwareProfileName) def get_software_profile_metadata(self, session: Session, name: str) -> Dict[str, str]: """ Call action_get_metadata() method for all kits """ self._logger.debug('Retrieving metadata for software profile [%s]', name) metadata: Dict[str, str] = {} for kit in self._kit_db_api.getKitList(session): if kit.getIsOs(): # ignore OS kits continue kit_installer = get_kit_installer( (kit.getName(), kit.getVersion(), kit.getIteration()))() kit_installer.session = session # we are only interested in software profile metadata item = kit_installer.action_get_metadata( software_profile_name=name) if item: metadata.update(item) return metadata
class TortugaCli(metaclass=ABCMeta): """ Base tortuga command line interface class. """ def __init__(self, validArgCount=0): self._logger = logging.getLogger(CLI_NAMESPACE) self._parser = argparse.ArgumentParser() self._args = [] self._validArgCount = validArgCount self._url = None self._username = None self._password = None self._verify = True self._optionGroupDict = {} self._cm = ConfigManager() self.__initializeLocale() def __initializeLocale(self): """Initialize the gettext domain """ langdomain = 'tortugaStrings' # Locate the Internationalization stuff localedir = '../share/locale' \ if os.path.exists('../share/locale') else \ os.path.join(self._cm.getRoot(), 'share/locale') gettext.install(langdomain, localedir) def getParser(self): """ Get parser for this class. """ return self._parser def addOption(self, *args, **kwargs): """ Add option. """ self._parser.add_argument(*args, **kwargs) def addOptionToGroup(self, groupName, *args, **kwargs): """ Add option for the given group name. Group should be created using addOptionGroup(). """ group = self._optionGroupDict.get(groupName) group.add_argument(*args, **kwargs) def addOptionGroup(self, groupName, desc): """ Add option group. """ group = self._parser.add_argument_group(groupName, desc) self._optionGroupDict[groupName] = group return group def parseArgs(self, usage=None): """ Parse args Raises: InvalidArgument """ common_group = _('Common Tortuga Options') self.addOptionGroup(common_group, None) self.addOptionToGroup(common_group, '-V', action='store_true', dest='cmdVersion', default=False, help=_('print version and exit')) self.addOptionToGroup(common_group, '-d', '--debug', dest='consoleLogLevel', default='warning', help=_('set debug level; valid values are: ' 'critical, error, warning, info, debug')) self.addOptionToGroup(common_group, '--url', help=_('Tortuga web service URL')) self.addOptionToGroup(common_group, '--username', dest='username', help=_('Tortuga web service user name')) self.addOptionToGroup(common_group, '--password', dest='password', help=_('Tortuga web service password')) self.addOptionToGroup(common_group, '--no-verify', dest='verify', action='store_false', default=True, help=_("Don't verify the API SSL certificate")) if usage: self._parser.description = usage try: self._args = self._parser.parse_args() except SystemExit as rc: sys.stdout.flush() sys.stderr.flush() sys.exit(int(str(rc))) if self._args.cmdVersion: print( _('{0} version: {1}'.format(os.path.basename(sys.argv[0]), self._cm.getTortugaRelease()))) sys.exit(0) self._setup_logging(self._args.consoleLogLevel) self._url, self._username, self._password, self._verify = \ self._get_web_service_options() return self._args def _setup_logging(self, log_level_name: str): """ Setup logging for the specified log level. :param str log_level_name: the name of the log level to use """ log_level_name = log_level_name.upper() if log_level_name not in [ 'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG' ]: print('Invalid debug level: {}'.format(log_level_name)) sys.exit(0) log_level = getattr(logging, log_level_name) logger = logging.getLogger(ROOT_NAMESPACE) logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(log_level) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) def _get_web_service_options(self): """ Read Tortuga web service credentials from config file, environment, or command-line. Command-line overrides either config file or environment. :return: tuple of (url, username, password) """ username = password = url = None cfg_file = os.path.join(os.path.expanduser('~'), '.local', 'tortuga', 'credentials') if os.path.exists(cfg_file): cfg = configparser.ConfigParser() cfg.read(cfg_file) username = cfg.get('default', 'username') \ if cfg.has_section('default') and \ cfg.has_option('default', 'username') else None password = cfg.get('default', 'password') \ if cfg.has_section('default') and \ cfg.has_option('default', 'password') else None url = cfg.get('default', 'url') \ if cfg.has_section('default') and \ cfg.has_option('default', 'url') else None # TORTUGA_WS_URL if self._args.url: # Command-line "--server" argument overrides env var and # setting contained within '/etc/profile.nii' url = self._args.url elif os.getenv('TORTUGA_WS_URL'): url = os.getenv('TORTUGA_WS_URL') # TORTUGA_WS_USERNAME if self._args.username: username = self._args.username elif os.getenv('TORTUGA_WS_USERNAME'): username = os.getenv('TORTUGA_WS_USERNAME') # TORTUGA_WS_PASSWORD if self._args.password: password = self._args.password elif os.getenv('TORTUGA_WS_PASSWORD'): password = os.getenv('TORTUGA_WS_PASSWORD') # # CLI arguments should override the environment variable # if os.getenv('TORTUGA_WS_NO_VERIFY'): verify = False else: verify = self._args.verify return url, username, password, verify def usage(self, s=None): """ Print usage information """ if s: sys.stderr.write(_('Error: {0}').format(s) + '\n') self._parser.print_help() sys.exit(1) def getArgs(self): '''Returns the command line argument list''' return self._args def getUrl(self): return self._url def getUsername(self): """ Get user name. """ return self._username def getPassword(self): """ Get password. """ return self._password @abstractmethod def runCommand(self): \ # pylint: disable=no-self-use """ This method must be implemented by the derived class. """ def run(self): """ Invoke runCommand() in derivative class and handle exceptions. """ try: self.runCommand() except TortugaException as ex: print(ex.getErrorMessage()) raise SystemExit(ex.getErrorCode()) except SystemExit: raise except Exception as ex: print(str(ex)) raise SystemExit(-1) def _parseDiskSize(self, diskSizeParam): \ # pylint: disable=no-self-use """ Parses diskSizeParam, returns an int value representing number of megabytes Raises: ValueError """ if diskSizeParam.endswith('TB'): return int(float(diskSizeParam[:-2]) * 1000000) if diskSizeParam.endswith('GB'): return int(float(diskSizeParam[:-2]) * 1000) elif diskSizeParam.endswith('MB'): # Must be an integer return int(diskSizeParam[:-2]) return int(diskSizeParam) def _getDiskSizeDisplayStr(self, volSize): \ # pylint: disable=no-self-use if volSize < 1000: result = '%s MB' % (volSize) elif volSize < 1000000: result = '%.3f GB' % (float(volSize) / 1000) else: result = '%.3f TB' % (float(volSize) / 1000000) return result
class OsBootHostManagerCommon(OsObjectManager): """Methods for manipulating PXE files""" def __init__(self): OsObjectManager.__init__(self) # Cache this for later try: self.passdata = pwd.getpwnam('apache') except KeyError: self.passdata = pwd.getpwnam(os.getenv('USER')) self.hardwareProfileDbApi = HardwareProfileDbApi() self.softwareProfileDbApi = SoftwareProfileDbApi() self._nodeApi = nodeApi.NodeApi() self._cm = ConfigManager() def __getActualSoftwareProfile(self, node, softwareProfileName): """ Determine the actual software profile. For example, """ self.getLogger().debug('__getActualSoftwareProfile(): node=[%s],' ' softwareProfileName=[%s]' % (node.name, softwareProfileName)) softwareProfile = None if node.isIdle: # Use the software profile that was passed in if there is one, if softwareProfileName: softwareProfile = self.softwareProfileDbApi.\ getSoftwareProfile(softwareProfileName, {'os': True}) else: # ELSE use the default idle software profile hardwareProfile = node.getHardwareProfile() idleSoftwareProfileId = hardwareProfile.\ getIdleSoftwareProfileId() if idleSoftwareProfileId: softwareProfile = self.softwareProfileDbApi.\ getSoftwareProfileById(idleSoftwareProfileId, {'os': True}) else: # Use active software profile if softwareProfileName is None: softwareProfile = node.getSoftwareProfile() else: softwareProfile = self.softwareProfileDbApi.\ getSoftwareProfile(softwareProfileName, {'os': True}) return softwareProfile def deletePuppetNodeCert(self, nodeName): # Remove the Puppet certificate when the node is reinstalled self.getLogger().debug('deletePuppetNodeCert(node=[%s])' % (nodeName)) puppetSslDir = '/etc/puppetlabs/puppet/ssl' puppetReportDir = '/var/lib/puppet/reports' puppetYamlDir = '/var/lib/puppet/yaml' filesToRemove = [ os.path.join(puppetSslDir, 'public_keys/%s.pem' % (nodeName)), os.path.join(puppetSslDir, 'ca/signed/%s.pem' % (nodeName)), os.path.join(puppetSslDir, 'private_keys/%s.pem' % (nodeName)), os.path.join(puppetSslDir, 'certs/%s.pem' % (nodeName)), os.path.join(puppetYamlDir, 'node/%s.yaml' % (nodeName)), os.path.join(puppetYamlDir, 'facts/%s.yaml' % (nodeName)), ] for fn in filesToRemove: try: os.unlink(fn) except OSError as exc: if exc.errno != 2: self.getLogger().error( 'Error attempting to remove %s (reason: %s)' % (fn, exc)) fn = os.path.join(puppetReportDir, nodeName) try: shutil.rmtree(fn) except OSError as exc: if exc.errno != 2: self.getLogger().error( 'Error attempting to remove %s (reason: %s)' % (fn, exc)) def nodeCleanup(self, nodeName): """ Remove files related to the node """ # Remove 'private' directory private_dir = os.path.join(self._cm.getRoot(), 'private', nodeName) if os.path.exists(private_dir): shutil.rmtree(private_dir) def addDhcpLease(self, node, nic): # Add DHCP lease to DHCP server pass def removeDhcpLease(self, nodeName): # Remove the DHCP lease from the DHCP server. This will be # a no-op on any platform that doesn't support the operation # (ie. any platform not running ISC DHCPD) pass def setNodeForNetworkBoot(self, dbNode): # Update node status to "Expired" and boot from network dbNode.state = 'Expired' dbNode.bootFrom = 0 self.deletePuppetNodeCert(dbNode.name) # Write the updated file self.writePXEFile(dbNode)
class NodeManager(TortugaObjectManager): \ # pylint: disable=too-many-public-methods def __init__(self): super(NodeManager, self).__init__() self._nodeDbApi = NodeDbApi() self._hardwareProfileDbApi = HardwareProfileDbApi() self._cm = ConfigManager() self._san = san.San() def __validateHostName(self, hostname: str, name_format: str) -> NoReturn: """ Raises: ConfigurationError """ bWildcardNameFormat = (name_format == '*') if hostname and not bWildcardNameFormat: # Host name specified, but hardware profile does not # allow setting the host name raise ConfigurationError( 'Hardware profile does not allow setting host names' ' of imported nodes') elif not hostname and bWildcardNameFormat: # Host name not specified but hardware profile expects it raise ConfigurationError( 'Hardware profile requires host names to be set') def createNewNode(self, session: Session, addNodeRequest: dict, dbHardwareProfile: HardwareProfiles, dbSoftwareProfile: Optional[SoftwareProfiles] = None, validateIp: bool = True, bGenerateIp: bool = True, dns_zone: Optional[str] = None) -> Nodes: """ Convert the addNodeRequest into a Nodes object Raises: NicNotFound """ self.getLogger().debug( 'createNewNode(): session=[%s], addNodeRequest=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s],' ' validateIp=[%s], bGenerateIp=[%s]' % (id(session), addNodeRequest, dbHardwareProfile.name, dbSoftwareProfile.name if dbSoftwareProfile else '(none)', validateIp, bGenerateIp)) # This is where the Nodes() object is first created. node = Nodes() # Set the default node state node.state = 'Discovered' if 'rack' in addNodeRequest: node.rack = addNodeRequest['rack'] node.addHostSession = addNodeRequest['addHostSession'] hostname = addNodeRequest['name'] \ if 'name' in addNodeRequest else None # Ensure no conflicting options (ie. specifying host name for # hardware profile in which host names are generated) self.__validateHostName(hostname, dbHardwareProfile.nameFormat) node.name = hostname # Complete initialization of new node record nic_defs = addNodeRequest['nics'] \ if 'nics' in addNodeRequest else [] AddHostServerLocal().initializeNode(session, node, dbHardwareProfile, dbSoftwareProfile, nic_defs, bValidateIp=validateIp, bGenerateIp=bGenerateIp, dns_zone=dns_zone) # Set hardware profile of new node node.hardwareProfileId = dbHardwareProfile.id # Set software profile of new node; if the software profile is None, # attempt to set the software profile to the idle software profile # of the associated hardware profile. This may also be None, in # which case the software profile is undefined. node.softwareprofile = dbSoftwareProfile \ if dbSoftwareProfile else dbHardwareProfile.idlesoftwareprofile node.isIdle = dbSoftwareProfile.isIdle \ if dbSoftwareProfile else True # Return the new node return node def getNode(self, name, optionDict=None): """Get node by name""" optionDict_ = optionDict.copy() if optionDict else {} optionDict_.update({'hardwareprofile': True}) node = self._nodeDbApi.getNode(name, optionDict_) hwprofile = self._hardwareProfileDbApi.getHardwareProfile( node.getHardwareProfile().getName(), {'resourceadapter': True}) adapter_name = hwprofile.getResourceAdapter().getName() \ if hwprofile.getResourceAdapter() else 'default' # Query vcpus from resource adapter ResourceAdapterClass = resourceAdapterFactory.getResourceAdapterClass( adapter_name) # Update Node object node.setVcpus(ResourceAdapterClass().get_node_vcpus(node.getName())) return node def getNodeById(self, nodeId, optionDict=None): """ Get node by node id Raises: NodeNotFound """ return self._nodeDbApi.getNodeById(int(nodeId), optionDict) def getNodeByIp(self, ip): """ Get node by IP address Raises: NodeNotFound """ return self._nodeDbApi.getNodeByIp(ip) def getNodeList(self, tags=None): """Return all nodes""" return self._nodeDbApi.getNodeList(tags=tags) def updateNode(self, nodeName, updateNodeRequest): self.getLogger().debug('updateNode(): name=[{0}]'.format(nodeName)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) if 'nics' in updateNodeRequest: nic = updateNodeRequest['nics'][0] if 'ip' in nic: node.nics[0].ip = nic['ip'] node.nics[0].boot = True # Call resource adapter NodesDbHandler().updateNode(session, node, updateNodeRequest) run_post_install = False if 'state' in updateNodeRequest: run_post_install = node.state == 'Allocated' and \ updateNodeRequest['state'] == 'Provisioned' node.state = updateNodeRequest['state'] session.commit() if run_post_install: self.getLogger().debug( 'updateNode(): run-post-install for node [{0}]'.format( node.name)) self.__scheduleUpdate() except Exception: session.rollback() self.getLogger().exception( 'Exception updating node [{0}]'.format(nodeName)) finally: DbManager().closeSession() def updateNodeStatus(self, nodeName, state=None, bootFrom=None): """Update node status If neither 'state' nor 'bootFrom' are not None, this operation will update only the 'lastUpdated' timestamp. Returns: bool indicating whether state and/or bootFrom differed from current value """ value = 'None' if bootFrom is None else \ '1 (disk)' if int(bootFrom) == 1 else '0 (network)' self.getLogger().debug( 'updateNodeStatus(): node=[%s], state=[%s], bootFrom=[%s]' % (nodeName, state, value)) session = DbManager().openSession() try: node = NodesDbHandler().getNode(session, nodeName) result = self._updateNodeStatus(node, state=state, bootFrom=bootFrom) session.commit() return result finally: DbManager().closeSession() def _updateNodeStatus(self, dbNode, state=None, bootFrom=None): """ Internal method which takes a 'Nodes' object instead of a node name. """ result = NodesDbHandler().updateNodeStatus(dbNode, state, bootFrom) # Only change local boot configuration if the hardware profile is # not marked as 'remote' and we're not acting on the installer node. if dbNode.softwareprofile and \ dbNode.softwareprofile.type != 'installer' and \ dbNode.hardwareprofile.location not in \ ('remote', 'remote-vpn'): osUtility.getOsObjectFactory().getOsBootHostManager().\ writePXEFile(dbNode, localboot=bootFrom) return result def __process_nodeErrorDict(self, nodeErrorDict): result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, 'addHostSession': node.addHostSession, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def deleteNode(self, nodespec): """ Delete node by nodespec Raises: NodeNotFound """ installer_hostname = socket.getfqdn().split('.', 1)[0] session = DbManager().openSession() try: nodes = [] for node in self.__expand_nodespec(session, nodespec): if node.name.split('.', 1)[0] == installer_hostname: self.getLogger().info( 'Ignoring request to delete installer node' ' ([{0}])'.format(node.name)) continue nodes.append(node) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) self.__preDeleteHost(nodes) nodeErrorDict = NodesDbHandler().deleteNode(session, nodes) # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict) session.commit() # ============================================================ # Perform actions *after* node deletion(s) have been committed # to database. # ============================================================ self.__postDeleteHost(nodes_deleted) addHostSessions = set( [tmpnode['addHostSession'] for tmpnode in nodes_deleted]) if addHostSessions: AddHostManager().delete_sessions(addHostSessions) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() for nodeName in result['NodesDeleted']: # Remove the Puppet cert bhm.deletePuppetNodeCert(nodeName) bhm.nodeCleanup(nodeName) self.getLogger().info('Node [%s] deleted' % (nodeName)) # Schedule a cluster update self.__scheduleUpdate() return result except TortugaException: session.rollback() raise except Exception: session.rollback() self.getLogger().exception('Exception in NodeManager.deleteNode()') raise finally: DbManager().closeSession() def __process_delete_node_result(self, nodeErrorDict): # REALLY!?!? Convert a list of Nodes objects into a list of # node names so we can report the list back to the end-user. # This needs to be FIXED! result = {} nodes_deleted = [] for key, nodeList in nodeErrorDict.items(): result[key] = [dbNode.name for dbNode in nodeList] if key == 'NodesDeleted': for node in nodeList: node_deleted = { 'name': node.name, 'hardwareprofile': node.hardwareprofile.name, } if node.softwareprofile: node_deleted['softwareprofile'] = \ node.softwareprofile.name nodes_deleted.append(node_deleted) return result, nodes_deleted def __preDeleteHost(self, nodes): self.getLogger().debug('__preDeleteHost(): nodes=[%s]' % (' '.join([node.name for node in nodes]))) if not nodes: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node in nodes: kitmgr.pre_delete_host( node.hardwareprofile.name, node.softwareprofile.name if node.softwareprofile else None, nodes=[node.name]) def __postDeleteHost(self, nodes_deleted): # 'nodes_deleted' is a list of dicts of the following format: # # { # 'name': 'compute-01', # 'softwareprofile': 'Compute', # 'hardwareprofile': 'LocalIron', # } # # if the node does not have an associated software profile, the # dict does not contain the key 'softwareprofile'. self.getLogger().debug('__postDeleteHost(): nodes_deleted=[%s]' % (nodes_deleted)) if not nodes_deleted: self.getLogger().debug('No nodes deleted in this operation') return kitmgr = KitActionsManager() for node_dict in nodes_deleted: kitmgr.post_delete_host(node_dict['hardwareprofile'], node_dict['softwareprofile'] if 'softwareprofile' in node_dict else None, nodes=[node_dict['name']]) def __scheduleUpdate(self): tortugaSubprocess.executeCommand( os.path.join(self._cm.getRoot(), 'bin/schedule-update')) def getInstallerNode(self, optionDict=None): return self._nodeDbApi.getNode(self._cm.getInstaller(), optionDict=optionDict) def getProvisioningInfo(self, nodeName): return self._nodeDbApi.getProvisioningInfo(nodeName) def getKickstartFile(self, node, hardwareprofile, softwareprofile): """ Generate kickstart file for specified node Raises: OsNotSupported """ osFamilyName = softwareprofile.os.family.name try: osSupportModule = __import__('tortuga.os.%s.osSupport' % (osFamilyName), fromlist=['OSSupport']) except ImportError: raise OsNotSupported('Operating system family [%s] not supported' % (osFamilyName)) OSSupport = osSupportModule.OSSupport tmpOsFamilyInfo = OsFamilyInfo(softwareprofile.os.family.name, softwareprofile.os.family.version, softwareprofile.os.family.arch) return OSSupport(tmpOsFamilyInfo).getKickstartFileContents( node, hardwareprofile, softwareprofile) def __transferNodeCommon(self, session, dbDstSoftwareProfile, results): \ # pylint: disable=no-self-use # Aggregate list of transferred nodes based on hardware profile # to call resource adapter minimal number of times. hwProfileMap = {} for transferResultDict in results: dbNode = transferResultDict['node'] dbHardwareProfile = dbNode.hardwareprofile if dbHardwareProfile not in hwProfileMap: hwProfileMap[dbHardwareProfile] = [transferResultDict] else: hwProfileMap[dbHardwareProfile].append(transferResultDict) session.commit() nodeTransferDict = {} # Kill two birds with one stone... do the resource adapter # action as well as populate the nodeTransferDict. This saves # having to iterate twice on the same result data. for dbHardwareProfile, nodesDict in hwProfileMap.items(): adapter = resourceAdapterFactory.getApi( dbHardwareProfile.resourceadapter.name) dbNodeTuples = [] for nodeDict in nodesDict: dbNode = nodeDict['node'] dbSrcSoftwareProfile = nodeDict['prev_softwareprofile'] if dbSrcSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbSrcSoftwareProfile.name] = { 'added': [], 'removed': [dbNode], } else: nodeTransferDict[dbSrcSoftwareProfile.name]['removed'].\ append(dbNode) if dbDstSoftwareProfile.name not in nodeTransferDict: nodeTransferDict[dbDstSoftwareProfile.name] = { 'added': [dbNode], 'removed': [], } else: nodeTransferDict[dbDstSoftwareProfile.name]['added'].\ append(dbNode) # The destination software profile is available through # node relationship. dbNodeTuples.append((dbNode, dbSrcSoftwareProfile)) adapter.transferNode(dbNodeTuples, dbDstSoftwareProfile) session.commit() # Now call the 'refresh' action to all participatory components KitActionsManager().refresh(nodeTransferDict) return results def transferNode(self, nodespec, dstSoftwareProfileName, bForce=False): """ Transfer nodes defined by 'nodespec' to 'dstSoftwareProfile' Raises: NodeNotFound SoftwareProfileNotFound NodeTransferNotValid """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) dbDstSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, dstSoftwareProfileName) results = NodesDbHandler().transferNode(session, nodes, dbDstSoftwareProfile, bForce=bForce) return self.__transferNodeCommon(session, dbDstSoftwareProfile, results) finally: DbManager().closeSession() def transferNodes(self, srcSoftwareProfileName, dstSoftwareProfileName, count, bForce=False): """ Transfer 'count' nodes from 'srcSoftwareProfile' to 'dstSoftwareProfile' Raises: SoftwareProfileNotFound """ session = DbManager().openSession() try: # It is not necessary to specify a source software profile. If # not specified, pick any eligible nodes in the hardware profile # mapped to the destination software profile. Don't ask me who # uses this capability, but it's here if you need it... dbSrcSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile( session, srcSoftwareProfileName) \ if srcSoftwareProfileName else None dbDstSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, dstSoftwareProfileName) results = NodesDbHandler().transferNodes(session, dbSrcSoftwareProfile, dbDstSoftwareProfile, int(float(count)), bForce=bForce) return self.__transferNodeCommon(session, dbDstSoftwareProfile, results) finally: DbManager().closeSession() def idleNode(self, nodespec): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) result = NodesDbHandler().idleNode(session, nodes) # Convert list of Nodes to list of node names for providing # user feedback. result_dict = {} for key, dbNodes in result.items(): result_dict[key] = [dbNode.name for dbNode in dbNodes] session.commit() # Remove Puppet certificate(s) for idled node(s) for node_name in result_dict['success']: # Remove Puppet certificate for idled node bhm = osUtility.getOsObjectFactory().getOsBootHostManager() bhm.deletePuppetNodeCert(node_name) # Schedule a cluster update self.__scheduleUpdate() return result_dict except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('[%s] %s' % (self.__class__.__name__, ex)) raise finally: DbManager().closeSession() def __process_activateNode_results(self, tmp_results, dstswprofilename): results = {} for key, values in tmp_results.items(): # With the exception of the "ProfileMappingNotAllowed" dict # item, all items in the dict are lists of nodes. if key != 'ProfileMappingNotAllowed': results[key] = [dbNode.name for dbNode in values] else: results[key] = \ [(value[0].name, value[1], value[2]) for value in values] if tmp_results['success']: # Iterate over activated nodes, creating dict keyed on # 'addHostSession' addHostSessions = {} for node in tmp_results['success']: if node.addHostSession not in addHostSessions: addHostSessions[node.addHostSession] = [] addHostSessions[node.addHostSession] = \ node.hardwareprofile.name # For each 'addHostSession', call postAddHost() for addHostSession, hwprofile in addHostSessions.items(): AddHostManager().postAddHost(hwprofile, dstswprofilename, addHostSession) return results def activateNode(self, nodespec, softwareProfileName): """ Raises: SoftwareProfileNotFound NodeNotFound TortugaException """ session = DbManager().openSession() try: dbSoftwareProfile = SoftwareProfilesDbHandler().\ getSoftwareProfile(session, softwareProfileName) \ if softwareProfileName else None dbNodes = self.__expand_nodespec(session, nodespec) if not dbNodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) tmp_results = NodesDbHandler().activateNode( session, dbNodes, dbSoftwareProfile) results = self.__process_activateNode_results( tmp_results, softwareProfileName) session.commit() # Schedule a cluster update self.__scheduleUpdate() return results except TortugaException as ex: session.rollback() raise except Exception as ex: session.rollback() self.getLogger().exception('%s' % ex) raise finally: DbManager().closeSession() def startupNode(self, nodespec, remainingNodeList=None, bootMethod='n'): """ Raises: NodeNotFound """ return self._nodeDbApi.startupNode(nodespec, remainingNodeList=remainingNodeList or [], bootMethod=bootMethod) def shutdownNode(self, nodespec, bSoftShutdown=False): """ Raises: NodeNotFound """ return self._nodeDbApi.shutdownNode(nodespec, bSoftShutdown) def build_node_filterspec(self, nodespec): filter_spec = [] for nodespec_token in nodespec.split(','): # Convert shell-style wildcards into SQL wildcards if '*' in nodespec_token or '?' in nodespec_token: filter_spec.append( nodespec_token.replace('*', '%').replace('?', '_')) continue if '.' not in nodespec_token: filter_spec.append(nodespec_token) filter_spec.append(nodespec_token + '.%') continue # Add nodespec "AS IS" filter_spec.append(nodespec_token) return filter_spec def __expand_nodespec(self, session, nodespec): \ # pylint: disable=no-self-use # Expand wildcards in nodespec. Each token in the nodespec can # be wildcard that expands into one or more nodes. return NodesDbHandler().getNodesByNameFilter( session, self.build_node_filterspec(nodespec)) def rebootNode(self, nodespec, bSoftReset=False, bReinstall=False): """ Raises: NodeNotFound """ session = DbManager().openSession() try: nodes = self.__expand_nodespec(session, nodespec) if not nodes: raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec)) bhm = osUtility.getOsObjectFactory().getOsBootHostManager() if bReinstall: for dbNode in nodes: bhm.setNodeForNetworkBoot(dbNode) results = NodesDbHandler().rebootNode(session, nodes, bSoftReset) session.commit() return results finally: DbManager().closeSession() def checkpointNode(self, nodeName): return self._nodeDbApi.checkpointNode(nodeName) def revertNodeToCheckpoint(self, nodeName): return self._nodeDbApi.revertNodeToCheckpoint(nodeName) def migrateNode(self, nodeName, remainingNodeList, liveMigrate): return self._nodeDbApi.migrateNode(nodeName, remainingNodeList, liveMigrate) def evacuateChildren(self, nodeName): self._nodeDbApi.evacuateChildren(nodeName) def getChildrenList(self, nodeName): return self._nodeDbApi.getChildrenList(nodeName) def setParentNode(self, nodeName, parentNodeName): self._nodeDbApi.setParentNode(nodeName, parentNodeName) def addStorageVolume(self, nodeName, volume, isDirect="DEFAULT"): """ Raises: VolumeDoesNotExist UnsupportedOperation """ node = self.getNode(nodeName, {'hardwareprofile': True}) # Only allow persistent volumes to be attached... vol = self._san.getVolume(volume) if vol is None: raise VolumeDoesNotExist('Volume [%s] does not exist' % (volume)) if not vol.getPersistent(): raise UnsupportedOperation( 'Only persistent volumes can be attached') api = resourceAdapterFactory.getApi( node.getHardwareProfile().getResourceAdapter().getName()) if isDirect == "DEFAULT": return api.addVolumeToNode(node, volume) return api.addVolumeToNode(node, volume, isDirect) def removeStorageVolume(self, nodeName, volume): """ Raises: VolumeDoesNotExist UnsupportedOperation """ node = self.getNode(nodeName, {'hardwareprofile': True}) api = resourceAdapterFactory.getApi( node.getHardwareProfile().getResourceAdapter().getName()) vol = self._san.getVolume(volume) if vol is None: raise VolumeDoesNotExist('The volume [%s] does not exist' % (volume)) if not vol.getPersistent(): raise UnsupportedOperation( 'Only persistent volumes can be detached') return api.removeVolumeFromNode(node, volume) def getStorageVolumes(self, nodeName): return self._san.getNodeVolumes(self.getNode(nodeName).getName()) def getNodesByNodeState(self, state): return self._nodeDbApi.getNodesByNodeState(state) def getNodesByNameFilter(self, _filter): return self._nodeDbApi.getNodesByNameFilter(_filter)
class ResourceAdapter(object): \ # pylint: disable=too-many-public-methods ''' This is the base class for all resource adapters to derive from. The default actions simply print a debug message to show that the subclass did not implement the action. ''' def __init__(self, addHostSession=None): if '__adaptername__' not in self.__class__.__dict__: raise NotImplementedError( 'Subclasses of ResourceAdapter must have __adaptername__' ' defined') self._logger = logging.getLogger('tortuga.resourceAdapter.%s' % (self.__adaptername__)) self._logger.addHandler(logging.NullHandler()) self.__installer_public_hostname = None self.__installer_public_ipaddress = None self.__private_dns_zone = None # Initialize caches self.__addHostApi = None self.__nodeApi = None self.__osObject = None self.__sanApi = None # Initialize abort flag (to "not" aborting) self.__isAborted = False self._cm = ConfigManager() self._addHostSession = addHostSession @property def addHostSession(self): return self._addHostSession @property def cacheCfgFilePath(self): return os.path.join(self._cm.getRoot(), 'var', '%s-instance.conf' % (self.__adaptername__)) @property def cfgFileName(self): return os.path.join( self._cm.getKitConfigBase(), 'adapter-defaults-%s.conf' % (self.__adaptername__)) def hookAction(self, action, nodes, args=None): # Only the 'default' resource adapter overrides the hookAction() # method. pass def start(self, addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile=None): \ # pylint: disable=unused-argument self.__trace(addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile) def validate_start_arguments(self, addNodesRequest, dbHardwareProfile, dbSoftwareProfile): self.__trace(addNodesRequest, dbHardwareProfile, dbSoftwareProfile) def stop(self, hardwareProfileName, deviceName): self.__trace(hardwareProfileName, deviceName) def updateNode(self, session, node, updateNodeRequest): \ # pylint: disable=unused-argument self.__trace(session, node, updateNodeRequest) def suspendActiveNode(self, nodeId): '''Change the given active node to an idle node''' self.__trace(nodeId) def idleActiveNode(self, nodeIds): '''Change the given active node to an idle node''' self.__trace(nodeIds) def activateIdleNode(self, node, softwareProfileName, softwareProfileChanged): '''Change the given idle node to an active node''' self.__trace(node, softwareProfileName, softwareProfileChanged) def deleteNode(self, nodeIds): '''Remove the given node (active or idle) from the system''' self.__trace(nodeIds) def _async_delete_nodes(self, nodes): """ Asynchronously delete nodes; calls "ResourceAdapter._delete_node()" method for each deleted nodes :param dbNodes: list of Nodes objects :return: None """ greenlets = [] for node in nodes: greenlets.append(gevent.spawn(self._delete_node, node)) # TODO: implement timeout gevent.joinall(greenlets) def _delete_node(self, node): """ Abstract method called to delete node from "ResourceAdapter._async_delete_nodes()" :param node: Nodes object """ def transferNode(self, nodeIdSoftwareProfileTuples, newSoftwareProfileName): '''Transfer the given idle node''' self.__trace(nodeIdSoftwareProfileTuples, newSoftwareProfileName) def startupNode(self, nodeIds, remainingNodeList=None, tmpBootMethod='n'): \ # pylint: disable=unused-argument '''Start the given node''' # By default raise unsupported operation raise UnsupportedOperation('Node does not support starting') def shutdownNode(self, nodes, bSoftReset=False): \ # pylint: disable=unused-argument '''Shutdown the given node''' # By default raise unsupported operation raise UnsupportedOperation('Node does not support shutdown') def rebootNode(self, nodes, bSoftReset=False): \ # pylint: disable=unused-argument '''Reboot the given node''' # By default raise unsupported operation raise UnsupportedOperation('Node does not support rebooting') def checkpointNode(self, nodeId): \ # pylint: disable=unused-argument '''Checkpoint the given node''' # By default raise unsupported operation raise UnsupportedOperation('Node does not support checkpointing') def revertNodeToCheckpoint(self, nodeId): \ # pylint: disable=unused-argument '''Revert the given node to the checkpoint''' # By default raise unsupported operation raise UnsupportedOperation('Node does not support checkpointing') def migrateNode(self, nodeId, remainingNodeList, liveMigrate): \ # pylint: disable=unused-argument '''Migrate the given node''' # By default raise unsupported operation raise UnsupportedOperation('Node does not support migrating') def addVolumeToNode(self, node, volume, isDirect): \ # pylint: disable=unused-argument '''Add a disk to a node''' # By default raise unsupported operation raise UnsupportedOperation( 'Node does not support dynamic disk addition') def removeVolumeFromNode(self, node, volume): \ # pylint: disable=unused-argument '''Remove a disk from a node''' # By default raise unsupported operation raise UnsupportedOperation( 'Node does not support dynamic disk deletion' % (node)) def abort(self): '''abort node addition''' self._logger.debug('Setting abort flag') self.__isAborted = True def isAborted(self): '''Returns status of abort flag''' return self.__isAborted def __trace(self, *pargs, **kargs): stack = traceback.extract_stack() funcname = stack[-2][2] self._logger.debug('-- (pass) %s::%s %s %s' % (self.__adaptername__, funcname, pargs, kargs)) def getLogger(self): return self._logger def getResourceAdapterConfig(self, sectionName=None): """ Raises: ResourceNotFound """ self.getLogger().debug( 'getResourceAdapterConfig(sectionName=[{0}])'.format( sectionName if sectionName else '(none)')) try: # Load default values defaultResourceAdapterConfigDict = self._loadConfigDict() if sectionName is None or sectionName == 'default': return defaultResourceAdapterConfigDict except ResourceNotFound: defaultResourceAdapterConfigDict = {} overrideConfigDict = self._loadConfigDict(sectionName) # Override defaults with hardware profile specific settings return dict( list(defaultResourceAdapterConfigDict.items()) + list(overrideConfigDict.items())) def _loadConfigDict(self, sectionName=None): """ Raises: ResourceNotFound """ if sectionName is None: sectionName = 'default' session = DbManager().openSession() try: self.getLogger().debug('_loadConfigDict()') result = ResourceAdapterCredentialsDbHandler().get( session, self.__adaptername__, sectionName) configDict = {} for entry in result['configuration']: configDict[entry['key']] = entry['value'] finally: DbManager().closeSession() return configDict def getResourceAdapterConfigProfileByNodeName(self, name): """Get resource adapter configuration for existing node""" self.getLogger().debug( 'getResourceAdapterConfigByNodeName(): name=[{0}]'.format(name)) instance_cache = self.instanceCacheRefresh() return instance_cache.get(name, 'resource_adapter_configuration') \ if instance_cache.has_section(name) and instance_cache.has_option( name, 'resource_adapter_configuration') else None def __getAddHostApi(self): '''Get and cache the Add Host API''' if self.__addHostApi is None: from tortuga.addhost.addHostServerLocal \ import AddHostServerLocal self.__addHostApi = AddHostServerLocal() return self.__addHostApi def __getNodeApi(self): '''Get and cache the Node API''' if self.__nodeApi is None: from tortuga.node.nodeApi import NodeApi self.__nodeApi = NodeApi() return self.__nodeApi def __getOsObject(self): '''Get and cache the OS Object Factory''' if self.__osObject is None: from tortuga.os_utility import osUtility self.__osObject = osUtility.getOsObjectFactory() return self.__osObject def __getSanApi(self): '''Internal: Get and cache the SAN API''' if self.__sanApi is None: from tortuga.san import san self.__sanApi = san.San() return self.__sanApi # Properties for this object addHostApi = property(__getAddHostApi, None, None, None) nodeApi = property(__getNodeApi, None, None, None) osObject = property(__getOsObject, None, None, None) sanApi = property(__getSanApi, None, None, None) def statusMessage(self, msg): if self._addHostSession: AddHostManager().updateStatus(self._addHostSession, msg) else: # Just print out the message...this is a stop gap for resource # adapters running outside of the addHostManager framework sys.stdout.write(msg + '\n') sys.stdout.flush() def getOptions(self, dbSoftwareProfile, dbHardwareProfile): \ # pylint: disable=unused-argument return {} def instanceCacheWrite(self, cfg): # Write the instance cache back to disk self.getLogger().debug('instanceCacheWrite()') with open(self.cacheCfgFilePath, 'w') as fp: cfg.write(fp) def instanceCacheRefresh(self): self.getLogger().debug('instanceCacheRefresh()') cfg = configparser.ConfigParser() cfg.read(self.cacheCfgFilePath) return cfg def instanceCacheSet(self, name, metadata=None): self.getLogger().debug('instanceCacheSet(node=[%s], metadata=[%s])' % (name, metadata)) cfg = self.instanceCacheRefresh() if not cfg.has_section(name): cfg.add_section(name) # Write metadata to node section if metadata: for key, value in metadata.items(): cfg.set(name, key, value) self.instanceCacheWrite(cfg) def instanceCacheSetBulk(self, instance_ids, nodes=None): self.getLogger().debug( 'instanceCacheSetBulk(instance_ids=[%s], nodes=[%s])' % (' '.join(instance_ids), ' '.join( [node.name for node in nodes or []]))) cfg = self.instanceCacheRefresh() if not nodes: if not cfg.has_section('unassigned'): cfg.add_section('unassigned') instances = set() else: val = cfg.get('unassigned', 'instances') instances = set(val.split(' ')) instances |= set(instance_ids) cfg.set('unassigned', 'instances', ' '.join(instances)) self.instanceCacheWrite(cfg) def instanceCacheGet(self, nodeName): self.getLogger().debug('instanceCacheGet(nodeName=[%s])' % (nodeName)) cfg = self.instanceCacheRefresh() if not cfg.has_section(nodeName): raise ResourceNotFound( 'No instance cache entry for [{0}]'.format(nodeName)) # Read entire section into a dict result = {} for key, value in cfg.items(nodeName): result[key] = value return result def instanceCacheDelete(self, name): # Clear instance from configuration config = self.instanceCacheRefresh() if not config.has_section(name): self.getLogger().debug( 'Cache clear: node [{0}] not found, no action' ' taken'.format(name)) return self.getLogger().debug('Cache clear: node [{0}]'.format(name)) config.remove_section(name) self.instanceCacheWrite(config) def instanceCacheUpdate(self, name, added=None, deleted=None): """ 'added' is a list of key-value tuples to be added 'deleted' is a list of keys to be removed from the instance cache """ self.getLogger().debug( 'instanceCacheUpdate(): name=[{0}]'.format(name)) config = self.instanceCacheRefresh() if not config.has_section(name): config.add_section(name) for key, value in added or []: config.set(name, key, value) for key in deleted or []: config.remove_option(name, key) self.instanceCacheWrite(config) def __findNicForProvisioningNetwork(self, nics, prov_network): """ TODO: move this elsewhere Raises: NicNotFound """ nics = [nic for nic in nics if nic.network == prov_network] if not nics: raise NicNotFound( 'Unable to find NIC on provisioning network [%s]' % (prov_network.address + '/' + prov_network.netmask)) return nics[0] def writeLocalBootConfiguration(self, node, hardwareprofile, softwareprofile): """ Raises: NicNotFound """ if not hardwareprofile.nics: # Hardware profile has no provisioning NICs defined. This # shouldn't happen... self.getLogger().debug( 'No provisioning nics defined in hardware profile %s' % (hardwareprofile.name)) return # Determine the provisioning nic for the hardware profile hwProfileProvisioningNic = hardwareprofile.nics[0] nic = None if hwProfileProvisioningNic.network: # Find the nic attached to the newly added node that is on # the same network as the provisioning nic. nic = self.__findNicForProvisioningNetwork( node.nics, hwProfileProvisioningNic.network) if not nic or not nic.mac: self.getLogger().warning( 'MAC address not defined for nic (ip=[%s]) on node [%s]' % (nic.ip, node.name)) return # Set up DHCP/PXE for newly addded node bhm = getOsObjectFactory().getOsBootHostManager() # Write out the PXE file bhm.writePXEFile(node, hardwareprofile=hardwareprofile, softwareprofile=softwareprofile, localboot=False) # Add a DHCP lease bhm.addDhcpLease(node, nic) def removeLocalBootConfiguration(self, node): bhm = self.osObject.getOsBootHostManager() bhm.rmPXEFile(node) bhm.removeDhcpLease(node) def _pre_add_host(self, name, hwprofilename, swprofilename, ip): \ # pylint: disable=unused-argument # Perform "pre-add-host" operation command = ('sudo %s/pre-add-host' ' --hardware-profile %s' ' --software-profile %s' ' --host-name %s' % (self._cm.getBinDir(), hwprofilename, swprofilename, name)) if ip: command += ' --ip %s' % (ip) self.getLogger().debug('calling command= [%s]' % (command)) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) p.communicate() p.wait() @property def installer_public_hostname(self): if self.__installer_public_hostname is None: cmd = '/opt/puppetlabs/bin/facter fqdn' with open(os.devnull, 'w') as devnull: p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=devnull) stdout, _ = p.communicate() retval = p.wait() if retval == 0: self.__installer_public_hostname = stdout.decode().rstrip() self.getLogger().debug('using installerName [%s] from Facter' % (self.__installer_public_hostname)) else: self.__installer_public_hostname = self._cm.getHost() self.getLogger().debug('using installerName [%s] from system' % (self.__installer_public_hostname)) return self.__installer_public_hostname @property def installer_public_ipaddress(self): # Get installer IP if self.__installer_public_ipaddress is None: self.getLogger().debug('Looking up installer IP using DNS') aiInfo = socket.getaddrinfo(self.installer_public_hostname, None, socket.AF_INET, socket.SOCK_STREAM) self.__installer_public_ipaddress = aiInfo[0][4][0] return self.__installer_public_ipaddress @property def private_dns_zone(self): if self.__private_dns_zone is None: self.__private_dns_zone = \ ParameterApi().getParameter('DNSZone').getValue() return self.__private_dns_zone def get_node_vcpus(self, name): \ # pylint: disable=unused-argument return 1 def get_instance_size_mapping(self, value): """ Helper method for matching the first field (instance size) in the resource adapter specific CSV file :return: instance type/size to vcpus mapping :returntype int: """ fn = os.path.join( self._cm.getKitConfigBase(), '{0}-instance-sizes.csv'.format(self.__adaptername__)) if not os.path.exists(fn): return 1 try: with open(fn) as fp: reader = csv.reader(fp) for row in reader: if row[0] == value: return int(row[1]) return 1 except Exception as exc: self.getLogger().error( 'Error processing instance type mapping' ' [{0}] (exc=[{1}]). Using default value'.format(fn, exc)) return 1
class TortugaDeployer: \ # pylint: disable=too-many-public-methods def __init__(self, logger, cmdline_options=None): self._cm = ConfigManager() self._logger = logger self._osObjectFactory = osUtility.getOsObjectFactory() self._settings = self.__load_settings(cmdline_options) self._settings['installer_software_profile'] = 'Installer' self._settings['installer_hardware_profile'] = 'Installer' self._settings['eulaAccepted'] = False self._settings['fqdn'] = getfqdn() self._settings['osInfo'] = getOsInfo() self._forceCleaning = False self._depotCreated = False fsManager = self._osObjectFactory.getOsFileSystemManager() self._lockFilePath = os.path.join( fsManager.getOsLockFilePath(), 'tortuga-setup') langdomain = 'tortuga-config' localedir = os.path.join(self._cm.getRoot(), 'share', 'locale') if not os.path.exists(localedir): # Try the system path localedir = '/usr/share/locale' gettext.bindtextdomain(langdomain, localedir) gettext.textdomain(langdomain) self.gettext = gettext.gettext self._ = self.gettext self._logger.info('Detected OS: [%s]', self._settings['osInfo']) def __load_settings(self, cmdline_options): settings = dict(list(cmdline_options.items())) default_cfgfile = os.path.join( self._cm.getKitConfigBase(), 'tortuga.ini') if 'inifile' in cmdline_options and \ cmdline_options['inifile'] != default_cfgfile: # Copy configuration specified on command-line to # $TORTUGA_ROOT/config/tortuga.ini self._logger.info( 'Using configuration file [%s]' % (settings['inifile'])) self._logger.info( 'Copying configuration to [%s]' % (default_cfgfile)) if os.path.exists(default_cfgfile): # Back up existing 'tortuga.ini' shutil.move(default_cfgfile, default_cfgfile + '.orig') shutil.copyfile(cmdline_options['inifile'], default_cfgfile) settings['inifile'] = default_cfgfile cfg = configparser.ConfigParser() cfg.read(settings['inifile']) settings['timezone'] = '' settings['utc'] = False settings['keyboard'] = 'us' settings['language'] = 'en_US.UTF-8' # Get database setting value = cfg.get('database', 'engine') \ if cfg.has_section('database') and \ cfg.has_option('database', 'engine') else None if value and value not in ('mysql', 'sqlite'): raise InvalidArgument( 'Unsupported database engine [%s]' % (value)) settings['database'] = { 'engine': value if value else 'mysql' } # Get depot directory if cfg.has_section('installer') and \ cfg.has_option('installer', 'depotpath'): settings['depotpath'] = cfg.get('installer', 'depotpath') # For consistency's sake... self._cm.setDepotDir(settings['depotpath']) else: settings['depotpath'] = self._cm.getDepotDir() # Internal web port settings['intWebPort'] = cfg.getint('installer', 'intWebPort') \ if cfg.has_section('installer') and \ cfg.has_option('installer', 'intWebPort') else \ self._cm.getIntWebPort() self._cm.setIntWebPort(settings['intWebPort']) # Admin port settings['adminPort'] = cfg.getint('installer', 'adminPort') \ if cfg.has_section('installer') and \ cfg.has_option('installer', 'adminPort') else \ self._cm.getAdminPort() self._cm.setAdminPort(settings['adminPort']) # IntWebServicePort settings['intWebServicePort'] = cfg.getint( 'installer', 'intWebServicePort') \ if cfg.has_section('installer') and \ cfg.has_option('installer', 'intWebServicePort') else \ self._cm.getIntWebServicePort() self._cm.setIntWebServicePort(settings['intWebServicePort']) return settings def _get_setting(self, name, section=None): if section and section in self._settings: return self._settings[section][name] \ if name in self._settings[section] else None return self._settings[name] if name in self._settings else None def eout(self, message, *args): """ Output messages to STDERR with Internationalization. Additional arguments will be used to substitute variables in the message output """ if args: mesg = self.gettext(message) % args else: mesg = self.gettext(message) sys.stderr.write(mesg) def out(self, message, *args): """ Output messages to STDOUT with Internationalization. Additional arguments will be used to substitute variables in the message output """ if args: mesg = self.gettext(message) % args else: mesg = self.gettext(message) sys.stdout.write(mesg) def prompt(self, default_value, auto_answer_default_value, text_list, question, tag=None, section=None, isPassword=False): """Generic user prompting routine""" resp_value = None bDefaults = self._settings['defaults'] if tag: resp_value = self._get_setting(tag, section=section) if not resp_value and bDefaults: # Use the default value default_value = auto_answer_default_value elif bDefaults: default_value = auto_answer_default_value if text_list: self.out('\n') for line in text_list: self.out(line + '\n') if default_value and not isPassword: self.out('\n%s [%s]: ' % (question, default_value)) else: self.out('\n%s: ' % (question)) if bDefaults or resp_value: if resp_value: value = resp_value else: value = auto_answer_default_value if not isPassword: self.out('%s\n' % value) else: if isPassword: import getpass value = getpass.getpass('').strip() else: value = input('').strip() if not value: value = default_value return value def checkPreInstallConfiguration(self): # pylint: disable=no-self-use """ Raises: InvalidMachineConfiguration """ # Check for existence of /etc/hosts if not os.path.exists('/etc/hosts'): raise InvalidMachineConfiguration( '/etc/hosts file is missing. Unable to proceed with' ' installation') def preInstallPrep(self): bAcceptEula = self._settings['acceptEula'] license_file = ' %s/LICENSE' % (self._cm.getEtcDir()) print() if bAcceptEula: cmd = 'cat %s\n' % (license_file) os.system(cmd) else: cmd = 'more %s\n' % (license_file) print("To install Tortuga you must read and agree to " "the following EULA.") print("Press 'Enter' to continue...") input('') os.system(cmd) print() while True: print('Do you agree? [Yes / No]', end=' ') answer = input('').lower() if answer not in ['yes', 'no', 'y', 'n']: print('Invalid response. Please respond \'Yes\'' ' or \'No\'') continue break if answer[0] == 'n': raise EulaAcceptanceRequired( 'You must accept the EULA to install Tortuga') self._settings['eulaAccepted'] = \ 'Accepted on: %s local machine time' % (time.ctime()) # Restore resolv.conf if we have a backup if osUtility.haveBackupFile('/etc/resolv.conf'): osUtility.restoreFile('/etc/resolv.conf') def _runCommandWithSpinner(self, cmd, statusMsg, logFileName): self._logger.debug( '_runCommandWithSpinner(cmd=[%s], logFileName=[%s])' % ( cmd, logFileName)) self.out(statusMsg + ' ') # Open the log file in unbuffered mode fpOut = open(logFileName, 'ab', 0) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, close_fds=True) for i in itertools.cycle(['/', '-', '\\', '|']): buf = p.stdout.readline() sys.stdout.write('') sys.stdout.flush() if not buf: break fpOut.write(buf) sys.stdout.write(i) sys.stdout.flush() sys.stdout.write(' ') self.out('done.\n') retval = p.wait() fpOut.close() return retval def puppetApply(self): ''' Complete the installer configuration by running against the previously installed Puppet master. Display a spinner while Puppet runs. ''' self._logger.info('Running Puppet for post-configuration') logFileName = '/tmp/tortuga_setup.log' cmd = ('/opt/puppetlabs/bin/puppet agent --color false --onetime' ' --no-daemonize --detailed-exitcodes --verbose 2>&1') retval = self._runCommandWithSpinner( cmd, statusMsg=( '\nCompleting installer configuration.' ' Please wait...'), logFileName=logFileName) if retval not in (0, 2): # Puppet can return a non-zero return code, even if it was # successful. errmsg = 'Puppet post-configuration failed (see log file %s)' % ( logFileName) self._logger.error(errmsg) self.out(errmsg + '\n') raise Exception(errmsg) self._logger.info('Puppet post-configuration completed') def startSetup(self): # If force was specified clean first and then run... bForce = self._settings['force'] if bForce: self._forceCleaning = True self.out( '--force option specified. Cleaning previous' ' installation.\n') self.cleanup() self._forceCleaning = False if os.path.exists(self._lockFilePath): raise SoftwareAlreadyDeployed( "\ntortuga-setup has already been run.\n\n" "Use --force option to force reinstallation.") open(self._lockFilePath, 'w').close() self.out('Tortuga Setup\n') def getClusterConfig(self): sysManager = self._osObjectFactory.getOsSysManager() self._settings['timezone'], self._settings['utc'] = \ sysManager.findTimeInfo() self._settings['keyboard'] = sysManager.findKeyboard() self._settings['language'] = sysManager.findLanguage() self.out(_('\nStarting Tortuga setup...\n')) # Ports configuration if not self._settings['defaults']: intWebPort, adminPort, intWebServicePort = self.configurePorts() self._cm.setIntWebPort(intWebPort) self._cm.setAdminPort(adminPort) self._cm.setIntWebServicePort(intWebServicePort) self._settings['intWebPort'] = intWebPort self._settings['adminPort'] = adminPort self._settings['intWebServicePort'] = intWebServicePort # Admin username and password self._settings['adminUsername'], \ self._settings['adminPassword'] = self.promptForAdminCredentials() def prepDepot(self): depotpath = None if not self._settings['defaults']: self.out( _('Tortuga requires a directory for storage of OS' ' distribution media and other files required for' ' node provisioning.\n\n')) while not depotpath: if self._settings['defaults']: response = self._settings['depotpath'] else: try: response = input( 'Please enter a depot path (Ctrl-C to interrupt)' ' [%s]: ' % (self._settings['depotpath'])) except KeyboardInterrupt: raise InvalidArgument(_('Aborted by user.')) if not response: response = self._settings['depotpath'] if not response.startswith('/'): errmsg = 'Depot path must be fully-qualified' if not self._settings['defaults']: self.out('Error: %s\n' % (errmsg)) continue raise InvalidArgument(errmsg) if response == '/': errmsg = 'Depot path cannot be system root directory' if not self._settings['defaults']: self.out(_('Error: %s\n' % (errmsg))) continue raise InvalidArgument(errmsg) if os.path.exists(response): if not self._settings['force']: if not self._settings['defaults']: self.out( _('Directory [%s] already exists. Do you wish to' ' remove it [N/y]? ') % (response)) remove_response = input('') if not remove_response or \ remove_response[0].lower() == 'n': continue_response = input( 'Do you wish to continue [N/y]? ') if continue_response and \ continue_response[0].lower() == 'y': continue raise InvalidArgument(_('Aborted by user.')) else: raise InvalidArgument( _('Existing depot directory [%s] will not be' ' removed.') % (response)) else: self.out( _('\nRemoving existing depot directory [%s]... ') % ( response)) depotpath = response tortugaSubprocess.executeCommand( 'rm -rf %s/*' % (depotpath)) self.out(_('done.\n')) else: depotpath = response self._settings['depotpath'] = depotpath self._cm.setDepotDir(self._settings['depotpath']) def _portPrompt(self, promptStr, defaultValue): while True: tmpPort = self.prompt( defaultValue, defaultValue, None, promptStr) try: tmpPort = int(tmpPort) if tmpPort <= 0 or tmpPort > 65535: raise ValueError('Port must be between 1 and 65535') # Success break except ValueError as ex: self.out('Error: ' + str(ex) + '\n') return tmpPort def configurePorts(self): reconfigurePorts = self.prompt( 'N', 'N', [ 'The following ports will be used by Tortuga:' '', ' +-----------------------------+-------+', ' | Description | Port |', ' +-----------------------------+-------+', ' | Internal webserver | %5d |' % ( self._settings['intWebPort']), ' | SSL webservice daemon | %5d |' % ( self._settings['adminPort']), ' | Local webservice daemon | %5d |' % ( self._settings['intWebServicePort']), ' +-----------------------------+-------+' ], 'Do you wish to change the default configuration [N/y]?') if not reconfigurePorts or reconfigurePorts[0].lower() == 'n': return self._settings['intWebPort'], \ self._settings['adminPort'], \ self._settings['intWebServicePort'] # Internal web server port intWebPort = self._portPrompt( 'Enter port for internal webserver', self._settings['intWebPort']) # SSL webservice daemon port adminPort = self._portPrompt( 'Enter port for SSL webservice daemon', self._settings['adminPort']) # Local webservice daemon port intWebServicePort = self._portPrompt( 'Enter port for local webservice daemon', self._settings['intWebServicePort']) return intWebPort, adminPort, intWebServicePort def _removePackageSources(self): pkgManager = self._osObjectFactory.getOsPackageManager() for pkgSrcName in pkgManager.getPackageSourceNames(): self._logger.info( 'Removing package source [%s]' % (pkgSrcName)) pkgManager.removePackageSource(pkgSrcName) def _disableTortugaws(self): self.out(' * Disabling Tortuga webservice\n') _tortugaWsManager = self._osObjectFactory.getTortugawsManager() serviceName = _tortugaWsManager.getServiceName() _osServiceManager = getOsObjectFactory().getOsServiceManager() try: _osServiceManager.stop(serviceName) except CommandFailed: pass def cleanup(self): # If possible, remove any package sources we added self._removePackageSources() osUtility.removeFile(self._lockFilePath) osUtility.removeFile(self._cm.getProfileNiiFile()) # Turn off the webservice daemon self._disableTortugaws() # Restore resolv.conf if osUtility.haveBackupFile('/etc/resolv.conf'): osUtility.restoreFile('/etc/resolv.conf') # Drop database dbManager = self._osObjectFactory.getOsApplicationManager( self._settings['database']['engine']) try: dbSchema = self._cm.getDbSchema() self.out(' * Removing database [%s]\n' % (dbSchema)) dbManager.destroyDb(dbSchema) except Exception as ex: # pylint: disable=broad-except self._logger.exception( 'Could not destroy existing db: {}'.format(ex)) # Remove DB password file osUtility.removeFile(self._cm.getDbPasswordFile()) # Remove CFM secret cfmSecretFile = self._cm.getCfmSecretFile() if os.path.exists(cfmSecretFile): osUtility.removeFile(self._cm.getCfmSecretFile()) # Generic cleanup osUtility.removeLink('/etc/tortuga-release') # Cleanup or remove depot directory errmsg = 'Removing contents of [%s]' % (self._settings['depotpath']) self._logger.debug(errmsg) if self._depotCreated: self.out(' * %s\n' % (errmsg)) osUtility.removeDir(self._settings['depotpath']) else: if self._settings['depotpath']: self.out(' * %s\n' % (errmsg)) tortugaSubprocess.executeCommand( 'rm -rf %s/*' % (self._settings['depotpath'])) self.out('\n') if not self._forceCleaning: self.out('Consult log(s) for further details.\n') self._logger.error('Installation failed') def runSetup(self): """ Installer setup. """ self.checkPreInstallConfiguration() # Do not run cleanup if this fails. self.startSetup() try: self.preInstallPrep() self.getClusterConfig() self.prepDepot() self.preConfig() self.pre_init_db() self.puppetBootstrap() dbm, session = self.initDatabase() try: self.createAdminUser( session, self._settings['adminUsername'], self._settings['adminPassword']) self.installKits(dbm) self.enableComponents(session) finally: dbm.closeSession() self.puppetApply() self.out('\nTortuga installation completed successfully!\n\n') print('Run \"exec -l $SHELL\" to initialize Tortuga environment\n') except Exception: # pylint: disable=broad-except self._logger.exception('Fatal error occurred during setup') raise TortugaException('Installation failed') def _generate_db_password(self): """ Generate a database password. """ # # Because Apache httpd server is not installed at the time this # runs, we cannot set the ownership of this file to be 'apache' # (which is necessary for the Tortuga webservice). # # Set ownership of file to root:puppet. # # When the Puppet bootstrap runs, it changes the ownership to # 'apache:puppet' and everybody is happy! # puppet_user = pwd.getpwnam('puppet') gid = puppet_user[3] self._generate_password_file(self._cm.getDbPasswordFile(), gid=gid) def _generate_redis_password(self): """ Generate a password for Redis. """ # # Puppet needs read access to this file so that it can use it for # writing the redis config file. # puppet_user = pwd.getpwnam('puppet') gid = puppet_user[3] self._generate_password_file(self._cm.getRedisPasswordFile(), gid=gid) def _generate_password_file(self, file_name: str, password_length: int = 32, uid: int = 0, gid: int = 0, mode: int = 0o440): """ Generate a password in a file. :param file_name: the name of the file in which the password will be stored :param password_length: the length of the password, default = 32 :param uid: the uid (owner) of the file, default = 0 :param gid: the gid (group) of the file, default = 0 :param mode: the file perms, default 0440 """ password = self._generate_password(password_length) with open(file_name, 'w') as fp: fp.write(password) os.chown(file_name, uid, gid) os.chmod(file_name, mode) def _generate_password(self, length: int = 8) -> str: """ Generate a password. :param length: the length of the password :return: the generated password """ chars = string.ascii_letters + string.digits return ''.join([random.choice(chars) for _ in range(length)]) def preConfig(self): # Create default hieradata directory hieraDataDir = '/etc/puppetlabs/code/environments/production/data' if not os.path.exists(hieraDataDir): os.makedirs(hieraDataDir) # Derive host name of puppet master from FQDN fqdn = self._settings['fqdn'] configDict = { 'version': 5, 'DNSZone': 'private', 'puppet_server': fqdn, 'depot': self._settings['depotpath'], } with open(os.path.join(hieraDataDir, 'tortuga-common.yaml'), 'wb') as fp: fp.write( yaml.safe_dump( configDict, explicit_start=True, default_flow_style=False).encode()) self._generate_db_password() self._generate_redis_password() def pre_init_db(self): # If using 'mysql' as the database backend, we need to install the # puppetlabs-mysql Puppet module prior to bootstrapping. This used # to be done in 'install-tortuga.sh' if self._settings['database']['engine'] == 'mysql': print('\nUsing MySQL as backing database.') puppet_module = 'puppetlabs-mysql' logmsg = f'Installing \'{puppet_module}\' module' self._logger.debug(logmsg) print(f'\n{logmsg}...', end='') cmd = ('/opt/puppetlabs/bin/puppet module install' f' --color false {puppet_module}') tortugaSubprocess.executeCommand(cmd) print('done.') def puppetBootstrap(self): localPuppetRoot = os.path.join(self._cm.getEtcDir(), 'puppet') logFileName = '/tmp/bootstrap.log' puppet_server = self._settings['fqdn'] # Bootstrap using Puppet cmd = ('/opt/puppetlabs/bin/puppet apply --verbose' ' --detailed-exitcodes' ' --execute "class { \'tortuga::installer\':' ' puppet_server => \'%s\',' '}"' % (puppet_server) ) retval = self._runCommandWithSpinner( cmd, '\nPerforming pre-configuration... Please wait...', logFileName=logFileName) if retval not in (0, 2): # Puppet can return a non-zero return code, even if it was # successful. self._logger.debug( 'Puppet pre-configuration returned non-zero' ' return code [%d]' % (retval)) errmsg = 'Puppet bootstrap failed (see log file %s)' % ( logFileName) self._logger.error(errmsg) raise Exception(errmsg) self._logger.debug('Puppet pre-configuration completed') def initDatabase(self) -> Tuple[Any, Session]: msg = _('Initializing database') self._logger.info(msg) print_('\n' + msg + '... ', end='') # This cannot be a global import since the database configuration # may be set in this script. from tortuga.db.dbManager import DbManager dbm = DbManager() # create database dbm.init_database() session = dbm.openSession() # Prime the database previously created as part of the bootstrap try: dbUtility.primeDb(session, self._settings) dbUtility.init_global_parameters(session, self._settings) print_(_('done')) session.commit() except Exception as exc: # pylint: disable=broad-except session.rollback() print_(_('failed.')) print_(_('Exception raised initializing database:') + ' {0}'.format(exc), file=sys.stderr) self._logger.debug('Done initializing database') return dbm, session def installKits(self, dbm): self._logger.info('Installing kits') self.out('\n' + _('Installing kits') + '...\n') kitApi = KitApi() # Iterate over the glob of 'kits-*.tar.bz2' kitFileGlob = '%s/kits/kit-*.tar.bz2' % (self._cm.getRoot()) # Split comma-separated list of kits to skip installing. Sorry, you # cannot skip installing the base kit. val = self._settings['skip_kits'] \ if 'skip_kits' in self._settings else '' skip_kits = set([ item for item in val.split(',') if item != 'base']) \ if val else set() for kitPackage in glob.glob(kitFileGlob): try: kit = get_metadata_from_archive(kitPackage) except KitNotFound: msg = 'Kit [%s] is malformed/invalid. Skipping.' % ( os.path.basename(kitPackage)) self._logger.error(msg) self.out(' %s\n' % (msg)) continue if kit['name'] in skip_kits: msg = 'Kit [%s] installation skipped.' % (kit['name']) self.out(' %s\n' % (msg)) self._logger.info(msg) continue try: kitApi.installKitPackage(dbm, kitPackage) except EulaAcceptanceRequired: msg = 'Kit [%s] requires EULA acceptance. Skipping.' % ( kitPackage) self.out(' %s\n' % (msg)) self._logger.info(msg) continue self.out(' - %s installed.\n' % (kit['name'])) self._logger.info('Kit [%s] installed' % (kit['name'])) self._logger.info('Done installing kits') load_kits() def enableComponents(self, session: Session): """ Raises: ConfigurationError """ self._logger.info('Enabling \'installer\' component') base_kit = KitApi().getKit(session, 'base') enabledComponents = ['installer'] # get list of components from 'base' kit components = [c for c in base_kit.getComponentList() if c.getName() in enabledComponents] installerNode = NodeApi().getInstallerNode(session) for component in components: SoftwareProfileApi().enableComponent( session, installerNode.getSoftwareProfile().getName(), base_kit.getName(), base_kit.getVersion(), base_kit.getIteration(), component.getName(), compVersion=component.getVersion(), ) def promptForAdminCredentials(self): # Get admin username and password for use with web service if self._settings['defaults']: self.out(_('\nUsing default Tortuga admin user name/password.\n')) return 'admin', 'password' username = password = None # Administrator username while True: username = self.prompt( 'admin', 'admin', ['Enter name for Tortuga admin user.', 'This user is not associated with any system user.'], 'Admin user name') if len(username) > 3: break self.out('Admin user name must be at least 4 characters.') # Administrator password while True: password = self.prompt( '', 'password', ['Enter password for Tortuga admin user.'], 'Admin password', None, None, True) if len(password) < 4: self.out('Admin password must be at least 4 characters.') continue confirmPassword = self.prompt( '', 'password', ['Confirm admin password.'], 'Confirm password', None, None, True) if confirmPassword == password: self.out('\n') break self.out('Passwords did not match.') return username, password def createAdminUser(self, session: Session, username, password): msg = _('Adding administrative user') self._logger.info(msg) self.out('\n' + msg + '... ') AdminApi().addAdmin( session, username, password, False, description='Added by tortuga-setup') self.out(_('done.') + '\n')
def main(): cm = ConfigManager() p = argparse.ArgumentParser() p.add_argument('-f', '--force', dest='force', action='store_true', default=False) p.add_argument('name', help='Software profile name') p.add_argument('kit', help='Kit descriptor (NAME-VERSION-ITERATION)') p.add_argument('component', help='Component descriptor (NAME-VERSION)') args = p.parse_args() kitNameAndVersion, kitIteration = args.kit.rsplit('-', 1) kitName, kitVersion = kitNameAndVersion.split('-', 1) compName, _ = args.component.split('-', 2) flagFile = os.path.join( cm.getRoot(), 'var/run/actions/%s/component_%s_%s_post_install' % (args.name, args.kit, args.component)) if os.path.exists(flagFile): if not args.force: sys.stderr.write( 'post-install component action for [%s] already run\n' % (compName)) sys.exit(0) # Remove the existing flag file, we're forcing a run os.unlink(flagFile) load_kits() kit_spec = (kitName, kitVersion, kitIteration) try: with DbManager().session() as session: kit_installer = get_kit_installer(kit_spec)() kit_installer.session = session c = kit_installer.get_component_installer(compName) if c is None: raise ComponentNotFound( 'Component [%s] not found in kit [%s]' % (compName, kitName)) c.run_action('post_install') logger.debug( 'post_install component action run for [%s] from kit [%s]' % (args.component, args.kit)) # Ensure destination directory exists if not os.path.exists(os.path.dirname(flagFile)): os.makedirs(os.path.dirname(flagFile)) # touch flagFile open(flagFile, 'w').close() except Exception as exc: # noqa pylint: disable=broad-except print('Error: {}'.format(exc), file=sys.stderr) sys.exit(0)
class SyncManager(TortugaObjectManager, Singleton): """Singleton class for cluster sync management""" # Singleton. __instanceLock = threading.RLock() # update delay increase (seconds) CLUSTER_UPDATE_DELAY_INCREASE = 30 # after this limit is reached, warning will be logged CLUSTER_UPDATE_WARNING_LIMIT = 10 def __init__(self): super(SyncManager, self).__init__() self._isUpdateScheduled = False self._isUpdateRunning = False self._sudoCmd = \ osUtility.getOsObjectFactory().getOsSysManager().getSudoCommand() self._cm = ConfigManager() def __runClusterUpdate(self): """ Run cluster update. """ self.getLogger().debug('Update timer running') updateCmd = '%s %s' % (self._sudoCmd, os.path.join(self._cm.getRoot(), 'bin/run_cluster_update.sh')) delay = 0 updateCnt = 0 while self.__resetIsUpdateScheduled(): self._isUpdateRunning = True self.getLogger().debug('New cluster update delay: %s seconds' % (delay)) time.sleep(delay) delay += SyncManager.CLUSTER_UPDATE_DELAY_INCREASE # Log warning if timer has been running for too many times. updateCnt += 1 self.getLogger().debug('Cluster update timer count: %s' % (updateCnt)) if updateCnt > SyncManager.CLUSTER_UPDATE_WARNING_LIMIT: self.getLogger().warn( 'Cluster updated more than %s times using the same' ' timer (possible configuration problem)' % (SyncManager.CLUSTER_UPDATE_WARNING_LIMIT)) self.getLogger().debug('Starting cluster update using: %s' % (updateCmd)) # Since we might sleep for a while, we need to # reset update flag just before we run update to avoid # unnecessary syncs. self.__resetIsUpdateScheduled() p = TortugaSubprocess(updateCmd) try: p.run() self.getLogger().debug('Cluster update successful') except CommandFailed: if p.getExitStatus() == tortugaStatus.\ TORTUGA_ANOTHER_INSTANCE_OWNS_LOCK_ERROR: self.getLogger().debug( 'Another cluster update is already running, will' ' try to reschedule it') self._isUpdateRunning = False self.scheduleClusterUpdate( updateReason='another update already running', delay=60) break else: self.getLogger().error( 'Update command "%s" failed (exit status: %s):' ' %s' % (updateCmd, p.getExitStatus(), p.getStdErr())) self.getLogger().debug('Done with cluster update') self._isUpdateRunning = False self.getLogger().debug('Update timer exiting') def __resetIsUpdateScheduled(self): """ Reset cluster update flag, return old flag value. """ SyncManager.__instanceLock.acquire() try: flag = self._isUpdateScheduled self._isUpdateScheduled = False return flag finally: SyncManager.__instanceLock.release() def scheduleClusterUpdate(self, updateReason=None, delay=5): """ Schedule cluster update. """ SyncManager.__instanceLock.acquire() try: if self._isUpdateScheduled: # Already scheduled. return # Start update timer if needed. self._isUpdateScheduled = True if not self._isUpdateRunning: self.getLogger().debug( 'Scheduling cluster update in %s seconds,' ' reason: %s' % (delay, updateReason)) t = threading.Timer(delay, self.__runClusterUpdate) t.start() else: self.getLogger().debug( 'Will not schedule new update timer while the old' ' timer is running') finally: SyncManager.__instanceLock.release() def getUpdateStatus(self): # pylint: disable=no-self-use """ Check cluster update flag. """ return RunManager().checkLock('cfmsync')
class OSSupport(OsSupportBase): def __init__(self, osFamilyInfo): super(OSSupport, self).__init__(osFamilyInfo) self._cm = ConfigManager() self._globalParameterDbApi = GlobalParameterDbApi() try: depot_dir = \ self._globalParameterDbApi.getParameter('depot').getValue() except ParameterNotFound: # Fallback to legacy default depot_dir = '/depot' self._cm.setDepotDir(depot_dir) def getPXEReinstallSnippet(self, ksurl, node, hardwareprofile=None, softwareprofile=None): \ # pylint: disable=no-self-use # General kickstart/kernel parameters # Find the first nic marked as bootable nics = [nic for nic in node.nics if nic.boot] if not nics: raise NicNotFound( 'Node [%s] does not have a bootable NIC' % (node.name)) # Choose the first one nic = nics[0] if hardwareprofile is None: hardwareprofile = node.hardwareprofile if softwareprofile is None: softwareprofile = node.softwareprofile # Use settings from software profile, if defined, otherwise use # settings from hardware profile. bootParams = getBootParameters(hardwareprofile, softwareprofile) kernel = bootParams['kernel'] kernelParams = bootParams['kernelParams'] initrd = bootParams['initrd'] bootargs = [ ] if softwareprofile.os.family.version == '7': # RHEL 7.x bootargs.append('inst.ks=%s' % (ksurl)) else: # RHEL 5.x and 6.x bootargs.append('ks=%s' % (ksurl)) bootargs.append('ksdevice=%s' % (nic.networkdevice.name)) # Append kernel parameters, if defined. if kernelParams: bootargs.append(kernelParams) result = '''\ kernel %s append initrd=%s %s''' % (kernel, initrd, ' '.join(bootargs)) return result def __get_kickstart_network_entry(self, dbNode, hardwareprofile, nic): \ # pylint: disable=no-self-use bProvisioningNic = nic.network == hardwareprofile.nics[0].network installer_private_ip = hardwareprofile.nics[0].ip if not bProvisioningNic and not nic.network.usingDhcp and not nic.ip: # Unconfigured public static IP network return None bActivate = False # By default, all interfaces are enabled at on boot bOnBoot = True # Use the network device name, as specified in the hardware profile netargs = [ 'network --device %s' % (nic.networkdevice.name) ] if bProvisioningNic: netargs.append( '--bootproto %s' % ( 'static' if bProvisioningNic or not nic.network.usingDhcp else 'dhcp')) netargs.append('--ip=%s' % (nic.ip)) netargs.append('--netmask=%s' % (nic.network.netmask)) netargs.append('--nameserver=%s' % (installer_private_ip)) bActivate = True else: if nic.network and nic.network.usingDhcp: netargs.append('--bootproto dhcp') else: netargs.append('--bootproto static') if nic.ip: netargs.append('--ip=%s' % (nic.ip)) netargs.append('--netmask=%s' % (nic.network.netmask)) else: # Do not enable interface if it's not configured netargs.append('--onboot=no') bOnBoot = False # Store provisioning network interface device name for # later reference in the template # Ensure all interfaces are activated if bActivate: netargs.append('--activate') bDefaultRoute = True if bProvisioningNic: # This is the nic connected to the provisioning network. if len(dbNode.nics) > 1: # Disable the default route on the management network. netargs.append('--nodefroute') bDefaultRoute = False else: # Disable DNS for all interfaces other than the # provisioning network if bOnBoot: netargs.append('--nodns') if nic.network.gateway and bDefaultRoute: netargs.append('--gateway %s' % (nic.network.gateway)) return ' '.join(netargs) def __validate_node(self, node): \ # pylint: disable=no-self-use """ Raises: NodeNotFound NicNotFound """ if not node.name: raise NodeNotFound('Node must have a name') if not node.nics: raise NicNotFound('Node [%s] has no associated nics' % ( node.name)) def __kickstart_get_timezone(self): tz = self._globalParameterDbApi.getParameter( 'Timezone_zone').getValue() # Ensure timezone does not contain any spaces return tz.replace(' ', '_') def __kickstart_get_network_section(self, node, hardwareprofile): # Ensure nics are processed in order (ie. eth0, eth1, eth2...) nics = node.nics nics.sort(key=lambda nic: nic.networkdevice.name) network_entries = [] hostname_set = False # Iterate over nics, adding 'network' Kickstart entries for each for nic in nics: networkString = self.__get_kickstart_network_entry( node, hardwareprofile, nic) if not networkString: continue if not hostname_set and nic.boot and \ nic.network.type == 'provision': networkString += ' --hostname=%s' % (node.name) hostname_set = True network_entries.append(networkString) return '\n'.join(network_entries) def __kickstart_get_repos(self, dbSwProfile, installer_private_ip): repo_entries = [] for dbComponent in dbSwProfile.components: dbKit = dbComponent.kit if dbKit.isOs or dbKit.name != 'base': # Do not add repos for OS kits continue kitVer = '%s-%s' % (dbKit.version, dbKit.iteration) kitArch = 'noarch' subpath = '%s/%s/%s' % (dbKit.name, kitVer, kitArch) # Check if repository actually exists if not os.path.exists(os.path.join(self._cm.getDepotDir(), 'kits', subpath, 'repodata', 'repomd.xml')): # Repository for specified kit is empty. Nothing to do... continue url = self._cm.getYumRootUrl(installer_private_ip) + \ '/' + subpath repo_entries.append( 'repo --name %s --baseurl=%s' % (dbKit.name, url)) subpath = '3rdparty/%s/%s/%s' % (dbSwProfile.os.family.name, dbSwProfile.os.family.version, dbSwProfile.os.arch) if os.path.exists(os.path.join(self._cm.getRoot(), 'repos', subpath, 'repodata/repomd.xml')): # Third-party repository contains packages, include it in # Kickstart url = '%s/%s' % ( self._cm.getYumRootUrl(installer_private_ip), subpath) repo_entries.append( 'repo --name tortuga-third-party --baseurl=%s' % (url)) return repo_entries def __get_kickstart_template(self, swprofile): ksTemplate = os.path.join( self._cm.getKitConfigBase(), 'kickstart-%s.tmpl' % (swprofile.os.family.name.encode('ascii'))) if not os.path.exists(ksTemplate): ksTemplate = os.path.join( self._cm.getKitConfigBase(), 'kickstart-%s.tmpl' % (swprofile.name.encode('ascii'))) if not os.path.exists(ksTemplate): ksTemplate = os.path.join( self._cm.getKitConfigBase(), 'kickstart.tmpl') return ksTemplate def __kickstart_get_partition_section(self, softwareprofile): buf = """\ #!/bin/sh # Determine how many drives we have """ # Temporary workaround for RHEL 5.7 based distros # https://bugzilla.redhat.com/show_bug.cgi?format=multiple&id=709880 if softwareprofile.os.version == '5.7': buf += 'set $(PYTHONPATH=/usr/lib/booty list-harddrives)\n' else: buf += 'set $(list-harddrives)\n' buf += """ d1=$1 d2=$3 d3=$5 d4=$7 """ clearpartstr = ''' cat >/tmp/partinfo << __PARTINFO__ zerombr ''' disksToPreserve = [] # Need to get the drives to clear clearpartstr += 'clearpart ' driveNumbers = [] for dbPartition in softwareprofile.partitions: disk = dbPartition.device.split('.')[0] if disk not in driveNumbers: driveNumbers.append(disk) if not dbPartition.preserve: # This is a partition to clear if len(driveNumbers) == 1: # First drive clearpartstr += ('--all --initlabel' ' --drives="${d%s:-nodisk}' % ( disk)) else: clearpartstr += ',${d%s:-nodisk}' % (disk) else: disksToPreserve.append(disk) clearpartstr += "--none" if not driveNumbers else '"' clearpartstr += '\n' for diskNum in driveNumbers: if diskNum in disksToPreserve: continue buf += ''' dd if=/dev/zero of=$d%s bs=512 count=1 ''' % (diskNum) buf += clearpartstr bootloaderLocation = "mbr" # Now create partitions for dbPartition in softwareprofile.partitions: if dbPartition.bootLoader: # Can't control the partition in anaconda...it will be on # the drive with the boot partition bootloaderLocation = 'partition' buf += self._processPartition(dbPartition) # now do the bootloader buf += ( 'bootloader --location=%s --driveorder=${d1:-nodisk}\n' % ( bootloaderLocation)) buf += '__PARTINFO__\n' return buf def __get_template_subst_dict(self, node, hardwareprofile, softwareprofile): hardwareprofile = hardwareprofile \ if hardwareprofile else node.hardwareprofile softwareprofile = softwareprofile \ if softwareprofile else node.softwareprofile installer_public_fqdn = socket.getfqdn() installer_hostname = installer_public_fqdn.split('.')[0] installer_private_ip = hardwareprofile.nics[0].ip try: private_domain = self._globalParameterDbApi.\ getParameter('DNSZone').getValue() except ParameterNotFound: private_domain = None installer_private_fqdn = '%s%s%s' % ( installer_hostname, get_installer_hostname_suffix( hardwareprofile.nics[0], enable_interface_aliases=None), '.%s' % (private_domain) if private_domain else '') vals = node.name.split('.', 1) domain = vals[1].lower() if len(vals) == 2 else '' d = { 'fqdn': node.name, 'domain': domain, 'hostname': installer_hostname, 'installer_private_fqdn': installer_private_fqdn, 'installer_private_domain': private_domain, 'installer_private_ip': installer_private_ip, 'puppet_master_fqdn': installer_public_fqdn, 'installer_public_fqdn': installer_public_fqdn, 'ntpserver': installer_private_ip, 'os': softwareprofile.os.name, 'osfamily': softwareprofile.os.family.name, 'osfamilyvers': int(softwareprofile.os.family.version), # These are deprecated and included for backwards compatibility # only. Do not reference them in any new kickstart templates. 'primaryinstaller': installer_private_fqdn, 'puppetserver': installer_public_fqdn, 'installerip': installer_private_ip, } # Add entry for install package source d['url'] = '%s/%s/%s/%s' % ( self._cm.getYumRootUrl(installer_private_fqdn), softwareprofile.os.name, softwareprofile.os.version, softwareprofile.os.arch) d['lang'] = 'en_US.UTF-8' d['keyboard'] = 'us' d['networkcfg'] = self.__kickstart_get_network_section( node, hardwareprofile) d['rootpw'] = self._generatePassword() d['timezone'] = self.__kickstart_get_timezone() d['includes'] = '%include /tmp/partinfo' d['repos'] = '\n'.join( self.__kickstart_get_repos( softwareprofile, installer_private_fqdn)) # Retain this for backwards compatibility with legacy Kickstart # templates d['packages'] = '\n'.join([]) d['prescript'] = self.__kickstart_get_partition_section( softwareprofile) d['installer_url'] = self._cm.getInstallerUrl(installer_private_fqdn) d['cfmstring'] = self._cm.getCfmPassword() return d def getKickstartFileContents(self, node, hardwareprofile, softwareprofile): # Perform basic sanity checking before proceeding self.__validate_node(node) template_subst_dict = self.__get_template_subst_dict( node, hardwareprofile, softwareprofile) with open(self.__get_kickstart_template(softwareprofile)) as fp: tmpl = fp.read() return Template(tmpl).render(template_subst_dict) def _generatePassword(self): \ # pylint: disable=no-self-use # Generate a random password, used when creating a Kickstart file # for package-based node provisioning. strlength = 8 strchars = string.ascii_letters + string.digits rootpw = ''.join([choice(strchars) for _ in range(strlength)]) rootpw = crypt.crypt(str(rootpw), str(time.time())) return rootpw def __get_partition_mountpoint(self, dbPartition): \ # pylint: disable=no-self-use if not dbPartition.mountPoint: if dbPartition.fsType == 'swap': mountPoint = 'swap' else: # Any partition that does not have a mountpoint defined # is ignored. return None else: mountPoint = dbPartition.mountPoint return mountPoint def _processPartition(self, dbPartition): \ # pylint: disable=no-self-use mountPoint = dbPartition.mountPoint \ if dbPartition.mountPoint else \ self.__get_partition_mountpoint(dbPartition) if not mountPoint: return '' result = '' # All partitions must have a mount point and partition type result = 'part %s --fstype %s' % (mountPoint, dbPartition.fsType) # This will throw an exception if the size stored in the # partition settings is not an integer. if dbPartition.size: result += ' --size=%d' % (dbPartition.size) else: # If partition size is not set or is zero, use '--recommended' flag if mountPoint == 'swap': result += ' --recommended' disk, part = dbPartition.device.split('.') optionsList = dbPartition.options.split(',') \ if dbPartition.options else [] if dbPartition.grow is not None: result += ' --grow' if dbPartition.maxSize is not None: result += ' --maxsize %d' % (dbPartition.maxSize) if optionsList: # Add the fs options... result += ' --fsoptions="%s"' % (','.join(optionsList)) result += ' --noformat --onpart=${d%s:-nodisk}%s' % (disk, part) \ if dbPartition.preserve else \ ' --ondisk=${d%s:-nodisk}' % str(disk) result += '\n' return result
class TortugaCli(metaclass=ABCMeta): """ Base tortuga command line interface class. """ def __init__(self, validArgCount=0): self._logger = logging.getLogger(CLI_NAMESPACE) self._config: TortugaScriptConfig = None self._parser = argparse.ArgumentParser() self._args = [] self._validArgCount = validArgCount self._optionGroupDict = {} self._cm = ConfigManager() self.__initializeLocale() def __initializeLocale(self): """Initialize the gettext domain """ langdomain = 'tortugaStrings' # Locate the Internationalization stuff localedir = '../share/locale' \ if os.path.exists('../share/locale') else \ os.path.join(self._cm.getRoot(), 'share/locale') gettext.install(langdomain, localedir) def getParser(self): """ Get parser for this class. """ return self._parser def addOption(self, *args, **kwargs): """ Add option. """ self._parser.add_argument(*args, **kwargs) def addOptionToGroup(self, groupName, *args, **kwargs): """ Add option for the given group name. Group should be created using addOptionGroup(). """ group = self._optionGroupDict.get(groupName) group.add_argument(*args, **kwargs) def addOptionGroup(self, groupName, desc): """ Add option group. """ group = self._parser.add_argument_group(groupName, desc) self._optionGroupDict[groupName] = group return group def parseArgs(self, usage=None): """ Parse args Raises: InvalidArgument """ common_group = 'Common Tortuga Options' self.addOptionGroup(common_group, None) self.addOptionToGroup(common_group, '-V', action='store_true', dest='cmdVersion', default=False, help='print version and exit') self.addOptionToGroup(common_group, '-d', '--debug', dest='consoleLogLevel', default='warning', help='set debug level; valid values are: ' 'critical, error, warning, info, debug') self.addOptionToGroup(common_group, '--config', dest='config', help='Path to config file ' '(defaults to ~/.tortuga/config)') self.addOptionToGroup(common_group, '--url', help='Tortuga web service URL') self.addOptionToGroup(common_group, '--username', dest='username', help='Tortuga web service user name') self.addOptionToGroup(common_group, '--password', dest='password', help='Tortuga web service password') self.addOptionToGroup(common_group, '--token', dest='token', help='Tortuga web service token') self.addOptionToGroup(common_group, '--no-verify', dest='verify', action='store_false', default=True, help="Don't verify the API SSL certificate") if usage: self._parser.description = usage try: self._args = self._parser.parse_args() except SystemExit as rc: sys.stdout.flush() sys.stderr.flush() sys.exit(int(str(rc))) if self._args.cmdVersion: print('{0} version: {1}'.format(os.path.basename(sys.argv[0]), self._cm.getTortugaRelease())) sys.exit(0) self._setup_logging(self._args.consoleLogLevel) self._load_config(self._args) return self._args def _setup_logging(self, log_level_name: str): """ Setup logging for the specified log level. :param str log_level_name: the name of the log level to use """ log_level_name = log_level_name.upper() if log_level_name not in [ 'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG' ]: print('Invalid debug level: {}'.format(log_level_name)) sys.exit(0) log_level = getattr(logging, log_level_name) logger = logging.getLogger(ROOT_NAMESPACE) logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(log_level) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) def _load_config(self, args: argparse.Namespace): """ Implements the --config argument. """ # # Load a config, filename may or may-not be provided... # try: self._config = TortugaScriptConfig.load(args.config) except ConfigException as ex: print(str(ex)) sys.exit(0) # # Override the config with any provided argument values # if args.url: self._config.url = args.url if args.username: self._config.username = args.username if args.password: self._config.password = args.password if args.token: self._config.token = args.token self._config.verify = args.verify def usage(self, s=None): """ Print usage information """ if s: sys.stderr.write('Error: {0}'.format(s)) + '\n' self._parser.print_help() sys.exit(1) def getArgs(self): return self._args def configureClient(self, client_class: Generic[T]) -> T: auth_method = self._config.get_auth_method() if auth_method == self._config.AUTH_METHOD_TOKEN: return client_class(token=self._config.get_token(), baseurl=self._config.url, verify=self._config.verify) elif auth_method == self._config.AUTH_METHOD_PASSWORD: return client_class(username=self._config.username, password=self._config.password, baseurl=self._config.url, verify=self._config.verify) raise Exception('Unsupported auth method: {}'.format(auth_method)) @abstractmethod def runCommand(self): \ # pylint: disable=no-self-use """ This method must be implemented by the derived class. """ def run(self): """ Invoke runCommand() in derivative class and handle exceptions. """ try: self.runCommand() except TortugaException as ex: print(ex.getErrorMessage()) raise SystemExit(ex.getErrorCode()) except SystemExit: raise except Exception as ex: print(str(ex)) raise SystemExit(-1) def _parseDiskSize(self, diskSizeParam): \ # pylint: disable=no-self-use """ Parses diskSizeParam, returns an int value representing number of megabytes Raises: ValueError """ if diskSizeParam.endswith('TB'): return int(float(diskSizeParam[:-2]) * 1000000) if diskSizeParam.endswith('GB'): return int(float(diskSizeParam[:-2]) * 1000) elif diskSizeParam.endswith('MB'): # Must be an integer return int(diskSizeParam[:-2]) return int(diskSizeParam) def _getDiskSizeDisplayStr(self, volSize): \ # pylint: disable=no-self-use if volSize < 1000: result = '%s MB' % (volSize) elif volSize < 1000000: result = '%.3f GB' % (float(volSize) / 1000) else: result = '%.3f TB' % (float(volSize) / 1000000) return result
class KitActions(ActionsBase): ''' A kit is a group of components that constitute a complete application. ''' def __init__(self, moduledir=None): ''' Arguments: moduledir Path to the module's directory. Defaults to CWD. E.g: "/opt/tortuga/kits/kit-ganglia-1.2.3" Attributes: name Kit name. version Kit version. moduledir Fully-qualified path to the root of the kit as installed on the filesystem. For example, "/opt/tortuga/kits/kit-ganglia-1.2.3" components a list of ComponentActions() objects _logger A logger instance for creating log messages _config configManager instance _root $TORTUGA_ROOT; e.g: "/opt/tortuga" ''' super(KitActions, self).__init__() self.name = self.__class__.__name__.lower() self.version = None if moduledir: self.moduledir = moduledir else: self.moduledir = os.getcwd() self.components = [] # Most kits need these things; including here for convenience. self._config = ConfigManager() self._root = self._config.getRoot() @property def config(self): return self._config def getLogger(self): return self._logger def getConfigManager(self): return self._config def getRoot(self): return self._root # Overridden form ActionsBase def getConfigFile(self): return "%s/%s-kit.conf" % (self.getConfigBase(), self.name.lower()) def getConfigBase(self): return "%s/%s" % (self.getConfigManager().getKitConfigBase(), self.name.lower()) def pre_install(self): ''' Pre-installation kit hook. ''' pass def post_install(self): ''' Post-installation kit hook. ''' pass def pre_uninstall(self): ''' Post-uninstallation kit hook. ''' pass def post_uninstall(self): ''' Post-uninstallation kit hook. ''' pass def add_component(self, component): '''Add the given component to the kit's list of components''' # Point the component to its parent component.kit = self self.components.append(component) def lookup_cname(self, cname): ''' Return the ComponentActions object from the KitActions whose name is "cname" Raises: ComponentNotFound ''' for c in self.components: if c.__component_name__ == cname: return c(self) raise ComponentNotFound("Can't find component [%s] in kit [%s]" % (cname, self.__class__.__name__)) def is_puppet_module_installed(self, name): cmd = '/opt/puppetlabs/bin/puppet module list --render-as=json' p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) puppet_module_list = json.load(p.stdout) retval = p.wait() if retval != 0: return None for modules in \ puppet_module_list['modules_by_path'].values(): for module in modules: if module.startswith('Module %s(' % (name)): return True return False def installPuppetModule(self, modulePath): """ Install "standard" Puppet module using "puppet module install --force" Raises: ConfigurationError """ if not os.path.exists(modulePath): errmsg = ('Error: unable to install puppet module [%s].' ' Module does not exist' % (modulePath)) self.getLogger().error(errmsg) raise ConfigurationError(errmsg) cmd = ('/opt/puppetlabs/bin/puppet module install --color false' ' --force %s' % (modulePath)) tortugaSubprocess.executeCommand(cmd) def uninstallPuppetModule(self, moduleName): cmd = ('/opt/puppetlabs/bin/puppet module uninstall' ' --color false --ignore-changes %s' % (moduleName)) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd) def install_wheel_matching_filespec(self, whl_pathspec): # Find an whl matching the filespec whl_files = glob.glob(whl_pathspec) if not whl_files: raise FileNotFound('No files found matching spec %s' % (whl_pathspec)) # Use the first whl file found cmd = '%s/pip install %s' % (self._config.getBinDir(), whl_files[0]) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd) def uninstall_wheel(self, wheel_name): cmd = 'pip uninstall %s' % (wheel_name) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
class TortugaCli(object): """ Base tortuga command line interface class. """ def __init__(self, validArgCount=0): self._logger = logging.getLogger('tortuga.cli.%s' % (self.__class__.__name__)) self._logger.addHandler(logging.NullHandler()) self._parser = OptionParser(add_help_option=False) self._options = None self._args = [] self._validArgCount = validArgCount self._username = None self._password = None self._optionGroupDict = {} self._cm = ConfigManager() self.__initializeLocale() commonGroup = _('Common Tortuga Options') self.addOptionGroup(commonGroup, None) self.addOptionToGroup(commonGroup, '-h', '--help', action='help', help=_('show this help message and exit')) self.addOptionToGroup(commonGroup, '-?', '', action='help', help=_('show this help message and exit')) self.addOptionToGroup(commonGroup, '-V', '', action='store_true', dest='cmdVersion', default=False, help=_('print version and exit')) self.addOptionToGroup( commonGroup, '-d', '--debug', dest='consoleLogLevel', help=_('set debug level; valid values are: critical, error,' ' warning, info, debug')) self.addOptionToGroup( commonGroup, '--username', dest='username', help=_('Credential to use when not running as root on the' ' installer.')) self.addOptionToGroup( commonGroup, '--password', dest='password', help=_('Credential to use when not running as root on the' ' installer.')) def getLogger(self): """ Get logger for this class. """ return self._logger def __initializeLocale(self): """Initialize the gettext domain """ langdomain = 'tortugaStrings' # Locate the Internationalization stuff localedir = '../share/locale' \ if os.path.exists('../share/locale') else \ os.path.join(self._cm.getRoot(), 'share/locale') gettext.install(langdomain, localedir) def getParser(self): """ Get parser for this class. """ return self._parser def addOption(self, *args, **kwargs): """ Add option. """ self._parser.add_option(*args, **kwargs) def addOptionToGroup(self, groupName, *args, **kwargs): """ Add option for the given group name. Group should be created using addOptionGroup(). """ group = self._optionGroupDict.get(groupName) group.add_option(*args, **kwargs) def addOptionGroup(self, groupName, desc): """ Add option group. """ group = OptionGroup(self._parser, groupName, desc) self._parser.add_option_group(group) self._optionGroupDict[groupName] = group def parseArgs(self, usage=None): """ Parse args Raises: InvalidArgument """ if usage: self._parser.usage = usage try: self._options, self._args = self._parser.parse_args() except SystemExit as rc: sys.stdout.flush() sys.stderr.flush() sys.exit(int(str(rc))) if self._validArgCount < len(self._args): # Postitional args are not enabled and we have some msg = _("Invalid Argument(s):") for arg in self._args[self._validArgCount:]: msg += " " + arg raise InvalidArgument(msg) optDict = self._options.__dict__ if optDict.get('cmdVersion'): print( _('{0} version: {1}'.format(os.path.basename(sys.argv[0]), self._cm.getTortugaRelease()))) sys.exit(0) # Log level. consoleLogLevel = optDict.get('consoleLogLevel', None) if consoleLogLevel: # logManager.setConsoleLogLevel(consoleLogLevel) logger = logging.getLogger('tortuga') logger.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) # Promote options to attributes self._username = self._options.username self._password = self._options.password return self._options, self._args def usage(self, s=None): '''Print the help provided by optparse''' if s: sys.stderr.write(_('Error: {0}').format(s) + '\n') self._parser.print_help() sys.exit(1) def getOptions(self): '''Returns the command line options''' return self._options def getNArgs(self): '''Returns the number of command line arguments''' return len(self._args) def getArgs(self): '''Returns the command line argument list''' return self._args def getArg(self, i): '''Returns the i-th command line argument''' return self._args[i] def getUsername(self): """ Get user name. """ return self._username def getPassword(self): """ Get password. """ return self._password def runCommand(self): \ # pylint: disable=no-self-use """ This method must be implemented by the derived class. """ raise AbstractMethod( _('runCommand() has to be overriden in the derived class.')) def run(self): """ Invoke runCommand() in derivative class and handle exceptions. """ try: self.runCommand() except TortugaException as ex: print('%s' % (ex.getErrorMessage())) raise SystemExit(ex.getErrorCode()) except SystemExit as ex: raise except Exception as ex: print('%s' % (ex)) raise SystemExit(-1) def getParam(self, xtype, options, oname, config, section, cname, default=None): ''' Get the value of a configurable parameter. First look at command line options. Return it if there. Then look in the configFile. Return it if there. Otherwise return the default. ''' value = self.__getParam2(options, oname, config, section, cname, default) if xtype == int: if not value: value = 0 elif type(value) != int: value = int(value) elif xtype == bool: if type(value) == str: value = value.lower() == 'true' elif type(value) == int: value = bool(value) return value def __getParam2(self, options, oname, config, section, cname, default): \ # pylint: disable=no-self-use # Command line option takes precedence if options and oname in options.__dict__ and \ options.__dict__[oname] is not None: return options.__dict__[oname] # Config file is next if config and config.has_section(section) and \ config.has_option(section, cname): return config.get(section, cname) # Last resort return default def _parseDiskSize(self, diskSizeParam): \ # pylint: disable=no-self-use """ Parses diskSizeParam, returns an int value representing number of megabytes Raises: ValueError """ if diskSizeParam.endswith('TB'): return int(float(diskSizeParam[:-2]) * 1000000) if diskSizeParam.endswith('GB'): return int(float(diskSizeParam[:-2]) * 1000) elif diskSizeParam.endswith('MB'): # Must be an integer return int(diskSizeParam[:-2]) return int(diskSizeParam) def _getDiskSizeDisplayStr(self, volSize): \ # pylint: disable=no-self-use if volSize < 1000: result = '%s MB' % (volSize) elif volSize < 1000000: result = '%.3f GB' % (float(volSize) / 1000) else: result = '%.3f TB' % (float(volSize) / 1000000) return result
class SoftwareProfileManager(TortugaObjectManager, Singleton): BASE_KIT_NAME = 'base' def __init__(self): super(SoftwareProfileManager, self).__init__() self._sp_db_api = SoftwareProfileDbApi() self._node_db_api = NodeDbApi() self._component_db_api = ComponentDbApi() self._global_param_db_api = GlobalParameterDbApi() self._kit_db_api = KitDbApi() self._config_manager = ConfigManager() def getSoftwareProfileList(self, tags=None): """Return all of the softwareprofiles with referenced components in this softwareprofile """ return self._sp_db_api.getSoftwareProfileList(tags=tags) def getIdleSoftwareProfileList(self): """ Return all of the idle softwareprofiles """ return self._sp_db_api.getIdleSoftwareProfileList() def setIdleState(self, softwareProfileName, state): """ Sets the idle state of a softwareprofile """ return self._sp_db_api.setIdleState(softwareProfileName, state) def addAdmin(self, softwareProfileName, adminUsername): """ Add an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound SoftwareProfileNotFound """ return self._sp_db_api.addAdmin(softwareProfileName, adminUsername) def deleteAdmin(self, softwareProfileName, adminUsername): """ Remove an admin as an authorized user. Returns: None Throws: TortugaException AdminNotFound SoftwareProfileNotFound """ return self._sp_db_api.deleteAdmin(softwareProfileName, adminUsername) def updateSoftwareProfile(self, softwareProfileObject): try: self.getLogger().debug('Updating software profile: %s' % (softwareProfileObject.getName())) # First get the object from the db we are updating... existingProfile = self.\ getSoftwareProfileById(softwareProfileObject.getId()) # Set parameters that we will not allow updating softwareProfileObject.setOsInfo(existingProfile.getOsInfo()) softwareProfileObject.setOsId(existingProfile.getOsId()) softwareProfileObject.setIsIdle(existingProfile.getIsIdle()) softwareProfileObject.setType(existingProfile.getType()) self._sp_db_api.updateSoftwareProfile(softwareProfileObject) except TortugaException as ex: raise except Exception as ex: self.getLogger().exception('%s' % ex) raise TortugaException(exception=ex) def getSoftwareProfile(self, name, optionDict=None): return self._sp_db_api.getSoftwareProfile(name, optionDict or {}) def getSoftwareProfileById(self, id_, optionDict=None): return self._sp_db_api.getSoftwareProfileById(id_, optionDict=optionDict or {}) def _getCoreComponentForOsInfo(self, osInfo): # Find core component # Find the version of the 'core' component import tortuga.kit.kitApi _kitApi = tortuga.kit.kitApi.KitApi() baseKit = None for baseKit in _kitApi.getKitList(): if not baseKit.getName() == self.BASE_KIT_NAME: continue break else: raise KitNotFound('Kit [%s] not found.' % (self.BASE_KIT_NAME)) baseComp = None for baseComp in baseKit.getComponentList(): if baseComp.getName() != 'core': continue break else: raise ComponentNotFound('Component [%s] not found in kit [%s]' % ('core', baseKit.getName())) comp = osUtility.getOsObjectFactory().getComponentManager().\ getBestMatchComponent( baseComp.getName(), baseComp.getVersion(), osInfo, baseKit.getId()) comp.setKit(baseKit) return comp def _getOsInfo(self, bOsMediaRequired): if not bOsMediaRequired: # As a placeholder, use the same OS as the installer # Find installer node entry node = self._node_db_api.getNode(ConfigManager().getInstaller(), {'softwareprofile': True}) return node.getSoftwareProfile().getOsInfo() # Use available operating system kit; raise exception if # multiple available os_kits = _get_os_kits() if not os_kits: raise KitNotFound('No operating system kit installed') if len(os_kits) > 1: raise KitNotFound( 'Multiple OS kits defined; use --os option to specify' ' operating system') kit = kitApiFactory.getKitApi().getKit(os_kits[0].getName(), os_kits[0].getVersion(), '0') components = kit.getComponentList() if not components: raise ComponentNotFound('Malformed operating system kit [%s]' % (os_kits)) osinfo_list = components[0].getOsInfoList() if len(osinfo_list) > 1: raise ComponentNotFound( 'Multiple operating system components for kit [%s];' ' use --os argument to specify operating system' % (os_kits[0])) return osinfo_list[0] def createSoftwareProfile(self, swProfileSpec, settingsDict=None): """ Exceptions: ConfigurationError NetworkNotFound ComponentNotFound KitNotFound OSError """ # Parse 'settingsDict' if settingsDict: # ... bOsMediaRequired; default is True bOsMediaRequired = settingsDict['bOsMediaRequired'] \ if 'bOsMediaRequired' in settingsDict else True # ... unmanagedProfile; default is False unmanagedProfile = settingsDict['unmanagedProfile'] \ if 'unmanagedProfile' in settingsDict else False # Validate software profile name validation.validateProfileName(swProfileSpec.getName()) # Insert default description for software profile if not swProfileSpec.getDescription() or \ swProfileSpec.getDescription() == '**DEFAULT**': swProfileSpec.setDescription('%s Nodes' % (swProfileSpec.getName())) self.getLogger().debug('Creating software profile [%s]' % (swProfileSpec)) osInfo = swProfileSpec.getOsInfo() \ if swProfileSpec.getOsInfo() else self._getOsInfo(bOsMediaRequired) # If we're creating an unmanaged software profile (no # DHCP/PXE/kickstart/OS) just create it now and we're done if unmanagedProfile: self._sp_db_api.addSoftwareProfile(swProfileSpec) else: if bOsMediaRequired and swProfileSpec.getOsInfo(): try: kitApiFactory.getKitApi().getKit( swProfileSpec.getOsInfo().getName(), swProfileSpec.getOsInfo().getVersion(), '0') except KitNotFound: self._logger.error('OS kit for [%s] not found' % (swProfileSpec.getOsInfo())) raise else: swProfileSpec.setOsInfo(osInfo) # Get component manager for appropriate OS family osConfig = osHelper.getOsInfo(osInfo.getName(), osInfo.getVersion(), osInfo.getArch()) osObjFactory = osUtility.getOsObjectFactory( osConfig.getOsFamilyInfo().getName()) compManager = osObjFactory.getComponentManager() # Need to be fancy with components spComponents = swProfileSpec.getComponents() swProfileSpec.setComponents(TortugaObjectList()) bFoundOsComponent = False bFoundCoreComponent = False components = [] # Iterate over components, adding them to the software profile for c in spComponents: cobj = compManager.getBestMatchComponent( c.getName(), c.getVersion(), osInfo, c.getKit().getId()) k = cobj.getKit() if k.getIsOs(): # This component is a member of the OS kit, set the flag bFoundOsComponent = True else: if c.getName() == 'core': # Found the 'core' component, set the flag bFoundCoreComponent = True components.append(cobj) # If the operating system is undefined for this software # profile, use the same OS as the installer. if bOsMediaRequired and not bFoundOsComponent: # Find OS component osCompName = '%s-%s-%s' % ( osInfo.getName(), osInfo.getVersion(), osInfo.getArch()) self.getLogger().debug('Automatically adding OS component [%s]' ' (not specified in template)' % (osCompName)) try: osComponent = self._component_db_api.getComponent( osCompName, osInfo.getVersion(), osInfo, {'kit': True}) components.append(osComponent) except ComponentNotFound: # Cannot find OS component, don't freak out pass # Ensure 'core' component is enabled if not bFoundCoreComponent: # Attempt to automatically add the core component, only # if one exists for this OS try: comp = self._getCoreComponentForOsInfo(osInfo) self.getLogger().debug( 'Automatically adding [core] component' ' (not specified in template)') components.append(comp) except ComponentNotFound: pass # Initialize values for kernel, kernelParams, and initrd if not swProfileSpec.getKernel(): swProfileSpec.setKernel( osObjFactory.getOsSysManager().getKernel(osInfo)) if not swProfileSpec.getInitrd(): swProfileSpec.setInitrd( osObjFactory.getOsSysManager().getInitrd(osInfo)) # Add the software profile self._sp_db_api.addSoftwareProfile(swProfileSpec) # Enable components in one fell swoop for comp in components: self.getLogger().debug('Enabling component [%s]' % (comp.getName())) if comp.getKit().getIsOs(): # Don't use enableComponent() on OS kit self._component_db_api.\ addComponentToSoftwareProfile( comp.getId(), swProfileSpec.getId()) continue self.enableComponent(swProfileSpec.getName(), comp.getKit().getName(), comp.getKit().getVersion(), comp.getKit().getIteration(), comp.getName(), comp.getVersion()) self.getLogger().debug( 'Software profile [%s] created successfully' % (swProfileSpec.getName())) def _getComponent(self, kit, compName, compVersion): \ # pylint: disable=no-self-use # Iterate over component list, looking for a match comp = None for comp in kit.getComponentList(): if comp.getName() == compName and \ comp.getVersion() == compVersion: break else: raise ComponentNotFound("Component [%s-%s] not found in kit [%s]" % (compName, compVersion, kit)) return comp def _get_kit_by_component(self, comp_name, comp_version=None): """ Gets a kit by compoent name/version. :param comp_name: the name of the component :param comp_version: the version of the component :raises KitNotFound: :raises ComponentNotFound: """ kit_list = self._kit_db_api.getKitList() kits = [ kit for kit in kit_list for component in kit.getComponentList() if component.getName() == comp_name and ( comp_version is None or component.getVersion() == comp_version) ] if not kits: raise KitNotFound('Kit containing component [%s] not found' % (comp_name)) if len(kits) > 1: raise ComponentNotFound( 'Kit name must be specified, multiple kits contain ' 'component: {}'.format(comp_name)) return kits[0] def enableComponent(self, software_profile_name, kit_name, kit_version, kit_iteration, comp_name, comp_version=None): """ Enable a component on a software profile. :param software_profile_name: the name of the software profile :param kit_name: the name of the kit :param kit_version: the version of the kit :param kit_iteration: the iteration of the kit :param comp_name: the name of the component :param comp_version: the version of the component :raises KitNotFound: :raises SoftwareProfileNotFound: :raises ComponentNotFound: """ kit, comp_version = self._get_kit_and_component_version( kit_name, kit_version, kit_iteration, comp_name, comp_version) software_profile = self.getSoftwareProfile(software_profile_name, {'os': True}) if kit.getIsOs(): best_match_component = self._enable_os_kit_component( kit, comp_name, comp_version, software_profile) else: best_match_component = self._enable_kit_component( kit, comp_name, comp_version, software_profile) if not best_match_component: self.getLogger().info( 'Component not enabled: {}'.format(comp_name)) else: self.getLogger().info( 'Enabled component on software profile: {} -> {}'.format( best_match_component, software_profile)) def _get_kit_and_component_version(self, kit_name, kit_version, kit_iteration, comp_name, comp_version=None): """ Gets a Kit instance and component version. :param kit_name: the name of the kit :param kit_version: the version of the kit :param kit_iteration: the iteration of the kit :param comp_name: the component name :param comp_version: the component version (optional) :return: a tuple, consisting of (Kit, component_version) """ kit = None if kit_name is None: kit = self._get_kit_by_component(comp_name, comp_version=comp_version) # # Get component version if required # if comp_version is None: for component in kit.getComponentList(): if component.getName() == comp_name: comp_version = component.getVersion() break elif kit_version is None or kit_iteration is None: kits_found = 0 for k in self._kit_db_api.getKitList(): if k.getName() == kit_name and \ (kit_version is None or k.getVersion() == kit_version) and \ (kit_iteration is None or k.getIteration() == kit_iteration): kit = k kits_found += 1 if kits_found > 1: if kit_version is not None: raise KitNotFound('Multiple kits found: {}-{}'.format( kit_name, kit_version)) else: raise KitNotFound( 'Multiple kits found {}'.format(kit_name)) else: kit = self._kit_db_api.getKit(kit_name, kit_version, kit_iteration) return kit, comp_version def _enable_kit_component(self, kit, comp_name, comp_version, software_profile): """ Enables a regular kit component on a specific software profile. :param kit: the Kit instance, whose component is being enabled :param comp_name: the name of the component to enable :param comp_version: the version of the component to enable :param software_profile: the software profile on which the component will be enabled :return: the Component instance that was enabled """ kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration()) load_kits() installer = get_kit_installer(kit_spec)() comp_installer = installer.get_component_installer(comp_name) if not comp_installer.is_enableable(software_profile): self.getLogger().warning('Component cannot be enabled: {}'.format( comp_installer.spec)) return None comp_installer.run_action('pre_enable', software_profile.getName()) best_match_component = self._add_component_to_software_profile( kit, comp_name, comp_version, software_profile) comp_installer.run_action('enable', software_profile.getName()) comp_installer.run_action('post_enable', software_profile.getName()) return best_match_component def _enable_os_kit_component(self, kit, comp_name, comp_version, software_profile): """ Enables an OS kit component on a specific software profile. :param kit: the OS Kit instance, whose component is being enabled :param comp_name: the name of the component to enable :param comp_version: the version of the component to enable :param software_profile: the software profile on which the component will be enabled :return: the Component instance that was enabled """ return self._add_component_to_software_profile(kit, comp_name, comp_version, software_profile) def _add_component_to_software_profile(self, kit, comp_name, comp_version, software_profile): """ Adds a kit to a software profile. This is a data-only operation, as no pre/post enable actions are called. :param kit: the OS Kit instance, whose component is being added :param comp_name: the name of the component to add :param comp_version: the version of the component to add :param software_profile: the software profile to which the component will be added :return: the Component instance that was added """ os_obj_factory = osUtility.getOsObjectFactory( software_profile.getOsInfo().getOsFamilyInfo().getName()) comp_manager = os_obj_factory.getComponentManager() best_match_component = comp_manager.getBestMatchComponent( comp_name, comp_version, software_profile.getOsInfo(), kit.getId()) self._component_db_api.addComponentToSoftwareProfile( best_match_component.getId(), software_profile.getId()) return best_match_component def disableComponent(self, software_profile_name, kit_name, kit_version, kit_iteration, comp_name, comp_version=None): """ Disables a component on a software profile. :param software_profile_name: the name of the software profile :param kit_name: the name of the kit :param kit_version: the version of the kit :param kit_iteration: the iteration of the kit :param comp_name: the name of the component :param comp_version: the version of the component :raises KitNotFound: :raises SoftwareProfileNotFound: :raises ComponentNotFound: """ kit, comp_version = self._get_kit_and_component_version( kit_name, kit_version, kit_iteration, comp_name) software_profile = self.getSoftwareProfile(software_profile_name, {'os': True}) if kit.getIsOs(): best_match_component = self._disable_os_kit_component( kit, comp_name, comp_version, software_profile) else: best_match_component = self._disable_kit_component( kit, comp_name, comp_version, software_profile) self.getLogger().info( 'Disabled component on software profile: {} -> {}'.format( best_match_component, software_profile)) def _disable_kit_component(self, kit, comp_name, comp_version, software_profile): """ Disables a regular kit component on a specific software profile. :param kit: the Kit instance, whose component is being disabled :param comp_name: the name of the component to disable :param comp_version: the version of the component to disable :param software_profile: the software profile on which the component will be disable :return: the Component instance that was disabled """ kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration()) load_kits() installer = get_kit_installer(kit_spec)() comp_installer = installer.get_component_installer(comp_name) comp_installer.run_action('pre_disable', software_profile.getName()) comp_installer.run_action('disable', software_profile.getName()) best_match_component = \ self._remove_component_from_software_profile( kit, comp_name, comp_version, software_profile) comp_installer.run_action('post_disable', software_profile.getName()) return best_match_component def _disable_os_kit_component(self, kit, comp_name, comp_version, software_profile): """ Enables an OS kit component on a specific software profile. :param kit: the OS Kit instance, whose component is being disabled :param comp_name: the name of the component to disable :param comp_version: the version of the component to disable :param software_profile: the software profile on which the component will be disabled :return: the Component instance that was disabled """ return self._remove_component_from_software_profile( kit, comp_name, comp_version, software_profile) def _remove_component_from_software_profile(self, kit, comp_name, comp_version, software_profile): """ Removes a kit to a software profile. This is a data-only operation, as no pre/post disable actions are called. :param kit: the OS Kit instance, whose component is being removed :param comp_name: the name of the component to remove :param comp_version: the version of the component to remove :param software_profile: the software profile to which the component will be removed :return: the Component instance that was removed """ os_obj_factory = osUtility.getOsObjectFactory( software_profile.getOsInfo().getOsFamilyInfo().getName()) comp_manager = os_obj_factory.getComponentManager() best_match_component = comp_manager.getBestMatchComponent( comp_name, comp_version, software_profile.getOsInfo(), kit.getId()) self._component_db_api.deleteComponentFromSoftwareProfile( best_match_component.getId(), software_profile.getId()) return best_match_component def deleteSoftwareProfile(self, name): """ Delete software profile by name Raises: SoftwareProfileNotFound """ self._sp_db_api.deleteSoftwareProfile(name) # Remove all flags for software profile swProfileFlagPath = os.path.join(self._config_manager.getRoot(), 'var/run/actions/%s' % (name)) if os.path.exists(swProfileFlagPath): shutil.rmtree(swProfileFlagPath) self.getLogger().info('Deleted software profile [%s]' % (name)) def getNodeList(self, softwareProfileName): return self._sp_db_api.getNodeList(softwareProfileName) def getEnabledComponentList(self, name): """ Get the list of enabled components """ return self._sp_db_api.getEnabledComponentList(name) def getPackageList(self, softwareProfileName): """ Get list of packages. """ return self._sp_db_api.getPackageList(softwareProfileName) def getPartitionList(self, softwareProfileName): """ Get list of partitions. """ return self._sp_db_api.getPartitionList(softwareProfileName) def getProvisioningInfo(self, nodeName): return self._sp_db_api.getProvisioningInfo(nodeName) def addUsableHardwareProfileToSoftwareProfile(self, hardwareProfileName, softwareProfileName): self._logger.info( 'Mapping hardware profile [%s] to software profile [%s]' % (hardwareProfileName, softwareProfileName)) return self._sp_db_api.\ addUsableHardwareProfileToSoftwareProfile(hardwareProfileName, softwareProfileName) def deleteUsableHardwareProfileFromSoftwareProfile(self, hardwareProfileName, softwareProfileName): return self._sp_db_api.\ deleteUsableHardwareProfileFromSoftwareProfile( hardwareProfileName, softwareProfileName) def copySoftwareProfile(self, srcSoftwareProfileName, dstSoftwareProfileName): # Validate software profile name validation.validateProfileName(dstSoftwareProfileName) self._logger.info('Copying software profile [%s] to [%s]' % (srcSoftwareProfileName, dstSoftwareProfileName)) softwareProfile = self._sp_db_api.\ copySoftwareProfile(srcSoftwareProfileName, dstSoftwareProfileName) return softwareProfile def getUsableNodes(self, softwareProfileName): return self._sp_db_api.getUsableNodes(softwareProfileName)
class OsBootHostManagerCommon(OsObjectManager): """Methods for manipulating PXE files""" def __init__(self): OsObjectManager.__init__(self) # Cache this for later try: self.passdata = pwd.getpwnam('apache') except KeyError: self.passdata = pwd.getpwnam(os.getenv('USER')) self._cm = ConfigManager() def deletePuppetNodeCert(self, nodeName): # Remove the Puppet certificate when the node is reinstalled self.getLogger().debug('deletePuppetNodeCert(node=[%s])' % (nodeName)) puppetSslDir = '/etc/puppetlabs/puppet/ssl' puppetReportDir = '/var/lib/puppet/reports' puppetYamlDir = '/var/lib/puppet/yaml' filesToRemove = [ os.path.join(puppetSslDir, 'public_keys/%s.pem' % (nodeName)), os.path.join(puppetSslDir, 'ca/signed/%s.pem' % (nodeName)), os.path.join(puppetSslDir, 'private_keys/%s.pem' % (nodeName)), os.path.join(puppetSslDir, 'certs/%s.pem' % (nodeName)), os.path.join(puppetYamlDir, 'node/%s.yaml' % (nodeName)), os.path.join(puppetYamlDir, 'facts/%s.yaml' % (nodeName)), ] for fn in filesToRemove: try: os.unlink(fn) except OSError as exc: if exc.errno != 2: self.getLogger().error( 'Error attempting to remove %s (reason: %s)' % (fn, exc)) fn = os.path.join(puppetReportDir, nodeName) try: shutil.rmtree(fn) except OSError as exc: if exc.errno != 2: self.getLogger().error( 'Error attempting to remove %s (reason: %s)' % (fn, exc)) def nodeCleanup(self, nodeName): """ Remove files related to the node """ # Remove 'private' directory private_dir = os.path.join(self._cm.getRoot(), 'private', nodeName) if os.path.exists(private_dir): shutil.rmtree(private_dir) def addDhcpLease(self, node, nic): # Add DHCP lease to DHCP server pass def removeDhcpLease(self, nodeName): # Remove the DHCP lease from the DHCP server. This will be # a no-op on any platform that doesn't support the operation # (ie. any platform not running ISC DHCPD) pass def setNodeForNetworkBoot(self, dbNode): # Update node status to "Expired" and boot from network dbNode.state = 'Expired' dbNode.bootFrom = 0 self.deletePuppetNodeCert(dbNode.name) # Write the updated file self.writePXEFile(dbNode)
def test_instantiation(): cm = ConfigManager() assert cm assert cm.getRoot()
class SyncManager(TortugaObjectManager): """Class for cluster sync management""" __instanceLock = threading.RLock() # update delay increase (seconds) CLUSTER_UPDATE_DELAY_INCREASE = 30 # after this limit is reached, warning will be logged CLUSTER_UPDATE_WARNING_LIMIT = 10 def __init__(self): super(SyncManager, self).__init__() self._isUpdateScheduled = False self._isUpdateRunning = False self._cm = ConfigManager() self._logger = logging.getLogger(SYNC_NAMESPACE) def __runClusterUpdate(self, opts={}): """ Run cluster update. """ self._logger.debug('Update timer running, opts={}'.format(opts)) updateCmd = os.path.join(self._cm.getBinDir(), 'run_cluster_update.sh') delay = 0 updateCnt = 0 while self.__resetIsUpdateScheduled(): self._isUpdateRunning = True self._logger.debug('New cluster update delay: %s seconds' % (delay)) time.sleep(delay) delay += SyncManager.CLUSTER_UPDATE_DELAY_INCREASE # Log warning if timer has been running for too many times. updateCnt += 1 self._logger.debug('Cluster update timer count: %s' % (updateCnt)) if updateCnt > SyncManager.CLUSTER_UPDATE_WARNING_LIMIT: self._logger.warning( 'Cluster updated more than %s times using the same' ' timer (possible configuration problem)' % (SyncManager.CLUSTER_UPDATE_WARNING_LIMIT)) self._logger.debug('Starting cluster update using: %s' % (updateCmd)) # Since we might sleep for a while, we need to # reset update flag just before we run update to avoid # unnecessary syncs. self.__resetIsUpdateScheduled() env = { **os.environ, 'PATH': self._cm.getBinDir() + ':' + os.environ['PATH'], 'TORTUGA_ROOT': self._cm.getRoot() } if 'node' in opts: node_update = opts['node'] env['FACTER_node_tags_update'] = json.dumps(node_update) self._logger.debug('FACTER_node_tags_update={}'.format( env['FACTER_node_tags_update'])) p = TortugaSubprocess(updateCmd, env=env) elif 'software_profile' in opts: swp_update = opts['software_profile'] env['FACTER_softwareprofile_tags_update'] = json.dumps( swp_update) self._logger.debug( 'FACTER_softwareprofile_tags_update={}'.format( env['FACTER_softwareprofile_tags_update'])) p = TortugaSubprocess(updateCmd, env=env) elif 'slurm_update' in opts: env['FACTER_slurm_cluster'] = opts['slurm_update'][ 'slurm_cluster'] self._logger.debug('FACTER_slurm_cluster={}'.format( env['FACTER_slurm_cluster'])) p = TortugaSubprocess(updateCmd, env=env) else: p = TortugaSubprocess(updateCmd) try: p.run() self._logger.debug('Cluster update successful') self._logger.debug('stdout: {}'.format( p.getStdOut().decode().rstrip())) self._logger.debug('stderr: {}'.format( p.getStdErr().decode().rstrip())) except CommandFailed: if p.getExitStatus() == tortugaStatus.\ TORTUGA_ANOTHER_INSTANCE_OWNS_LOCK_ERROR: self._logger.debug( 'Another cluster update is already running, will' ' try to reschedule it') self._isUpdateRunning = False self.scheduleClusterUpdate( updateReason='another update already running', delay=60, opts=opts) break else: self._logger.error( 'Update command "%s" failed (exit status: %s)' % (updateCmd, p.getExitStatus())) self._logger.debug('stdout: {}'.format( p.getStdOut().decode().rstrip())) self._logger.debug('stderr: {}'.format( p.getStdErr().decode().rstrip())) self._logger.debug('Done with cluster update') self._isUpdateRunning = False self._logger.debug('Update timer exiting') def __resetIsUpdateScheduled(self): """ Reset cluster update flag, return old flag value. """ SyncManager.__instanceLock.acquire() try: flag = self._isUpdateScheduled self._isUpdateScheduled = False return flag finally: SyncManager.__instanceLock.release() def scheduleClusterUpdate(self, updateReason=None, delay=5, opts={}): """ Schedule cluster update. """ SyncManager.__instanceLock.acquire() try: if self._isUpdateScheduled: # Already scheduled. return # Start update timer if needed. self._isUpdateScheduled = True if not self._isUpdateRunning: self._logger.debug('Scheduling cluster update in %s seconds,' ' reason: %s, opts: %s' % (delay, updateReason, opts)) t = threading.Timer(delay, self.__runClusterUpdate, kwargs=dict(opts=opts)) t.start() else: self._logger.debug( 'Will not schedule new update timer while the old' ' timer is running') finally: SyncManager.__instanceLock.release() def getUpdateStatus(self): # pylint: disable=no-self-use """ Check cluster update flag. """ return RunManager().checkLock('cfmsync')