def _disable_kit_component(self, kit, comp_name, comp_version, software_profile): """ Disables a regular kit component on a specific software profile. :param kit: the Kit instance, whose component is being disabled :param comp_name: the name of the component to disable :param comp_version: the version of the component to disable :param software_profile: the software profile on which the component will be disable :return: the Component instance that was disabled """ kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration()) load_kits() installer = get_kit_installer(kit_spec)() comp_installer = installer.get_component_installer(comp_name) comp_installer.run_action('pre_disable', software_profile.getName()) comp_installer.run_action('disable', software_profile.getName()) best_match_component = \ self._remove_component_from_software_profile( kit, comp_name, comp_version, software_profile) comp_installer.run_action('post_disable', software_profile.getName()) return best_match_component
def _enable_kit_component(self, kit, comp_name, comp_version, software_profile): """ Enables a regular kit component on a specific software profile. :param kit: the Kit instance, whose component is being enabled :param comp_name: the name of the component to enable :param comp_version: the version of the component to enable :param software_profile: the software profile on which the component will be enabled :return: the Component instance that was enabled """ kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration()) load_kits() installer = get_kit_installer(kit_spec)() comp_installer = installer.get_component_installer(comp_name) if not comp_installer.is_enableable(software_profile): self.getLogger().warning('Component cannot be enabled: {}'.format( comp_installer.spec)) return None comp_installer.run_action('pre_enable', software_profile.getName()) best_match_component = self._add_component_to_software_profile( kit, comp_name, comp_version, software_profile) comp_installer.run_action('enable', software_profile.getName()) comp_installer.run_action('post_enable', software_profile.getName()) return best_match_component
def get_software_profile_metadata(self, session: Session, name: str) -> Dict[str, str]: """ Call action_get_metadata() method for all kits """ self._logger.debug('Retrieving metadata for software profile [%s]', name) metadata: Dict[str, str] = {} for kit in self._kit_db_api.getKitList(session): if kit.getIsOs(): # ignore OS kits continue kit_installer = get_kit_installer( (kit.getName(), kit.getVersion(), kit.getIteration()))() kit_installer.session = session # we are only interested in software profile metadata item = kit_installer.action_get_metadata( software_profile_name=name) if item: metadata.update(item) return metadata
def main(): cm = ConfigManager() p = argparse.ArgumentParser() p.add_argument('-f', '--force', dest='force', action='store_true', default=False) p.add_argument('name', help='Software profile name') p.add_argument('kit', help='Kit descriptor (NAME-VERSION-ITERATION)') p.add_argument('component', help='Component descriptor (NAME-VERSION)') args = p.parse_args() kitNameAndVersion, kitIteration = args.kit.rsplit('-', 1) kitName, kitVersion = kitNameAndVersion.split('-', 1) compName, _ = args.component.split('-', 2) flagFile = os.path.join( cm.getRoot(), 'var/run/actions/%s/component_%s_%s_post_install' % (args.name, args.kit, args.component)) if os.path.exists(flagFile): if not args.force: sys.stderr.write( 'post-install component action for [%s] already run\n' % (compName)) sys.exit(0) # Remove the existing flag file, we're forcing a run os.unlink(flagFile) load_kits() kit_spec = (kitName, kitVersion, kitIteration) try: with DbManager().session() as session: kit_installer = get_kit_installer(kit_spec)() kit_installer.session = session c = kit_installer.get_component_installer(compName) if c is None: raise ComponentNotFound( 'Component [%s] not found in kit [%s]' % (compName, kitName)) c.run_action('post_install') logger.debug( 'post_install component action run for [%s] from kit [%s]' % (args.component, args.kit)) # Ensure destination directory exists if not os.path.exists(os.path.dirname(flagFile)): os.makedirs(os.path.dirname(flagFile)) # touch flagFile open(flagFile, 'w').close() except Exception as exc: # noqa pylint: disable=broad-except print('Error: {}'.format(exc), file=sys.stderr) sys.exit(0)
def get_puppet_node_yaml(session, nodeName): _cm = ConfigManager() publicInstallerFQDN = _cm.getInstaller().lower() primaryInstallerHostName = publicInstallerFQDN.split('.', 1)[0] try: dnsZone = GlobalParametersDbHandler().getParameter( session, 'DNSZone').value.lower() except ParameterNotFound: dnsZone = None try: depot_path = GlobalParametersDbHandler().getParameter( session, 'depot').value.lower() _cm.setDepotDir(depot_path) except ParameterNotFound: pass bInstaller = primaryInstallerHostName == nodeName.split('.', 1)[0] try: dbNode = NodesDbHandler().getNode(session, nodeName) except NodeNotFound: sys.exit(1) data = None try: from tortuga.db.dataRequestsDbHandler import DataRequestsDbHandler dbDataRequest = DataRequestsDbHandler().get_by_addHostSession( session, dbNode.addHostSession) if dbDataRequest: data = dbDataRequest.request except Exception as e: pass if dbNode.hardwareprofile.nics: privateInstallerFQDN = '%s%s%s' % (primaryInstallerHostName, get_installer_hostname_suffix( dbNode.hardwareprofile.nics[0], enable_interface_aliases=None), '.%s' % (dnsZone) if dnsZone else '') else: privateInstallerFQDN = '%s%s' % (primaryInstallerHostName, '.%s' % (dnsZone) if dnsZone else '') if not bInstaller and dbNode.hardwareprofile.location == 'local': # If the hardware profile does not have an associated provisioning # NIC, use the public installer FQDN by default. This can happen if # the user has added their own "public" nodes to a local hardware # profile. if not dbNode.hardwareprofile.nics: installerHostName = publicInstallerFQDN else: installerHostName = privateInstallerFQDN else: # If the specified node is the installer itself or a node # accessing the installer through it's public interface, use the # public host name. installerHostName = publicInstallerFQDN puppet_classes = {} enabledKits = set() if dbNode.softwareprofile: for dbComponent in dbNode.softwareprofile.components: if not dbComponent.kit.isOs: # # Load the kit and component installers # kit_spec = (dbComponent.kit.name, dbComponent.kit.version, dbComponent.kit.iteration) kit_installer = get_kit_installer(kit_spec)() kit_installer.session = session _component = kit_installer.get_component_installer( dbComponent.name) # # Get the puppet args for the component # try: puppet_class_args = _component.run_action( 'get_puppet_args', dbNode.softwareprofile, dbNode.hardwareprofile, data=data) if puppet_class_args is not None: puppet_classes[_component.puppet_class] = \ puppet_class_args except Exception: # noqa pylint: disable=broad-except # suppress exception if unable to get Puppet args puppet_classes[_component.puppet_class] = {} else: # # OS kit component is omitted on installer. The installer # is assumed to have a pre-existing OS repository # configuration. # if bInstaller: continue enabledKits.add(dbComponent.kit) dataDict = {} if puppet_classes: dataDict['classes'] = puppet_classes parametersDict = {} dataDict['parameters'] = parametersDict # software profile if dbNode.softwareprofile: parametersDict['swprofilename'] = dbNode.softwareprofile.name # hardware profile parametersDict['hwprofilename'] = dbNode.hardwareprofile.name # installer hostname parametersDict['primary_installer_hostname'] = installerHostName # Local repos directory repodir = os.path.join(_cm.getDepotDir(), 'kits') # Build YUM repository entries only if we have kits associated with # the software profile. if enabledKits: repourl = _cm.getIntWebRootUrl(installerHostName) + '/repos' \ if not bInstaller else 'file://{0}'.format(repodir) repo_type = None if dbNode.softwareprofile.os.family.name == 'rhel': repo_type = 'yum' # elif dbNode.softwareprofile.os.family == 'ubuntu': # repo_type = 'apt' if repo_type: # Only add 'repos' entries for supported operating system # families. repos_dict = {} for kit in enabledKits: if kit.isOs: verstr = str(kit.version) arch = kit.components[0].os[0].arch else: verstr = '%s-%s' % (kit.version, kit.iteration) arch = 'noarch' for dbKitSource in dbNode.softwareprofile.kitsources: if dbKitSource in kit.sources: baseurl = dbKitSource.url break else: subpath = '%s/%s/%s' % (kit.name, verstr, arch) if not kit.isOs and not os.path.exists( os.path.join(repodir, subpath, 'repodata/repomd.xml')): continue baseurl = '%s/%s' % (repourl, subpath) # [TODO] temporary workaround for handling RHEL media # path. # # This code is duplicated from tortuga.boot.distro if kit.isOs and \ dbNode.softwareprofile.os.name == 'rhel' and \ dbNode.softwareprofile.os.family.version != '7': subpath += '/Server' if repo_type == 'yum': if dbNode.hardwareprofile.location == 'remote': cost = 1200 else: cost = 1000 repos_dict['uc-kit-%s' % (kit.name)] = { 'type': repo_type, 'baseurl': baseurl, 'cost': cost, } if repos_dict: parametersDict['repos'] = repos_dict # Enable '3rdparty' repo if dbNode.softwareprofile: third_party_repo_subpath = '3rdparty/%s/%s/%s' % ( dbNode.softwareprofile.os.family.name, dbNode.softwareprofile.os.family.version, dbNode.softwareprofile.os.arch) local_repos_path = os.path.join(repodir, third_party_repo_subpath) # Check for existence of repository metadata to validate existence if enabledKits and os.path.exists( os.path.join(local_repos_path, 'repodata', 'repomd.xml')): third_party_repo_dict = { 'tortuga-third-party': { 'type': 'yum', 'baseurl': os.path.join(repourl, third_party_repo_subpath), }, } if 'repos' not in parametersDict: parametersDict['repos'] = third_party_repo_dict else: parametersDict['repos'] = dict( list(parametersDict['repos'].items()) + list(third_party_repo_dict.items())) # environment dataDict['environment'] = 'production' sys.stdout.write( yaml.safe_dump(dataDict, default_flow_style=False, explicit_start=True))