Esempio n. 1
0
def getAllNfsServerIpAddresses(ctx):
    """ Get a list of all ip addresses """
    cmdSsh = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user)
    # Get all ip adresses (v4) of the NFS server
    ipAddrList = cmdSsh.run('hostname -I').out
    logging.debug(f'IP addresses of {ctx.cf.nfs.host.name}: [{ipAddrList}]')
    return f'{ipAddrList}'
Esempio n. 2
0
    def __init__(self, ctx):

        cmdSsh = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user)
        lssCmd = f"ls -ladtr --time-style=long-iso {ctx.cf.nfs.bases.overlay}/*-*-*-*-*"

        self._overlays = []

        for line in cmdSsh.run(lssCmd, rcOk=(0, 1, 2)).out.split('\n'):
            if not line or line == '':
                continue
            (_d1, _d2, _d3, _d4, _d5, creationDate, creationTime,
             file) = line.split()
            self._overlays.append(
                Overlay(ctx, os.path.basename(file), creationDate,
                        creationTime))
class Builder():
    """ Build container images """

    # pylint: disable=too-many-instance-attributes

    def __init__(self, ctx):
        self._ctx = ctx
        self._host = None
        self._user = None
        self._cmdShell = CmdShell()
        self._cmdSsh = None
        self._remoteCopy = None
        self._flavor = None
        self._description = None

    def buildImage(self, sidU, host, user):
        """ Build image """

        # pylint: disable=too-many-locals,too-many-statements

        repoRoot = self._ctx.cf.build.repo.root
        buildTmpRoot = self._ctx.ar.temp_root
        buildDir = self._ctx.ar.build_directory
        keepFiles = self._ctx.ar.keep_files

        # Initialize ssh connection

        if not self._cmdSsh or host != self._host or user != self._user:
            # Initialize only if not yet initialized or if connection parameters have changed
            if self._cmdSsh:
                del self._cmdSsh
            self._cmdSsh = CmdSsh(self._ctx, host, user)

        # Initialize remote copy connection

        if not self._remoteCopy or host != self._host or user != self._user:
            # Initialize only if not yet initialized or if connection parameters have changed
            if self._remoteCopy:
                del self._remoteCopy
            self._remoteCopy = RemoteCopy(self._ctx, host, user)

        self._host = host
        self._user = user

        # System ID

        sidL = sidU.lower()

        logging.debug(f"sidU: '{sidU}'")
        logging.debug(f"sidL: '{sidL}'")

        # Directories

        dirs = types.SimpleNamespace()
        dirs.repoRoot = repoRoot
        if buildDir and len(buildDir) != 0:
            dirs.build = buildDir
        else:
            self._cmdShell.run(f'mkdir -p "{buildTmpRoot}"')
            dirs.build = self._cmdShell.run(
                f'mktemp -d -p "{buildTmpRoot}" '
                f'-t soos-build-{self._flavor}.XXXXXXXXXX').out
        dirs.usrSapReal = self._getUsrSapReal()
        dirs.sapmnt = self._ctx.cf.refsys.nws4.base.sapmnt

        self._setDirsFlavor(sidU, dirs)

        logging.debug(f"dirs: '{dirs}'")

        # Image properties

        image = types.SimpleNamespace()
        image.name = f'localhost/soos-{sidL}'
        image.version = 'latest'
        image.tag = f'{image.name}:{image.version}'
        image.date = date.today().strftime('%Y-%m-%d')
        image.description = self._description  # Must be set by derived class
        with pushd(dirs.repoRoot):
            image.commit = self._cmdShell.run('git log --pretty="%H" -1').out
            image.branch = self._cmdShell.run(
                'git rev-parse --abbrev-ref HEAD').out

        logging.debug(f"image: '{image}'")

        # OS user properties

        (sapadm, sidadm, sapsysGid) = self._getOsUserProperties(sidL)

        logging.debug(f"sapadm   : '{sapadm}'")
        logging.debug(f"sidadm   : '{sidadm}'")
        logging.debug(f"sapsysGid: '{sapsysGid}'")

        # Misc

        buildCmd = 'podman'
        remoteOs = 'linux' + self._cmdSsh.run('uname -m').out

        # Start build process

        with tempfile.TemporaryDirectory() as dirs.tmp:
            logging.debug(f"Created temporary directory '{dirs.tmp}'")
            self._cleanupAtStart(dirs, keepFiles)
            self._genBuildContext(sidU, dirs, sapadm, sidadm, sapsysGid, host,
                                  remoteOs)
            containerfile = self._genContainerfile(sidU, dirs, image, sapadm,
                                                   sidadm, sapsysGid)
            self._buildImage(buildCmd, dirs, image, containerfile)
            self._cleanupAtEnd(dirs)

    def _getUsrSapReal(self):
        # Check whether /usr/sap is a real directory or a symlink to another directory
        usrSapReal = self._cmdSsh.run('readlink /usr/sap').out
        if len(usrSapReal) != 0:
            logging.info(
                f"Detected that '/usr/sap' is a symbolic link to '{usrSapReal}'"
            )
        else:
            usrSapReal = '/usr/sap'
        logging.debug(f"usrSapReal: '{usrSapReal}'")
        return usrSapReal

    def _setDirsFlavor(self, sidU, dirs):
        # Flavor specific directories
        # pylint: disable=unused-argument
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _getOsUserProperties(self, sidL):
        # Get properties of sapadm and <sid>adm from remote host /etc/passwd

        sapadm = types.SimpleNamespace()
        (_d1, _d2, sapadm.uid, sapsysGid, sapadm.comment, sapadm.home,
         sapadm.shell
         ) = self._cmdSsh.run('grep "^sapadm:" /etc/passwd').out.split(':')

        sidadm = types.SimpleNamespace()
        (_d1, _d2, sidadm.uid, _d4, sidadm.comment, sidadm.home, sidadm.shell
         ) = self._cmdSsh.run(f'grep "^{sidL}adm:" /etc/passwd').out.split(':')

        logging.debug(f'Returning {sapadm}, {sidadm}, {sapsysGid}')

        return (sapadm, sidadm, sapsysGid)

    def _cleanupAtStart(self, dirs, keepFiles):
        # Remove previously copied files if not explicitly asked to keep them
        if not keepFiles:
            logging.info(
                f"##### Cleaning up build directoy '{dirs.build}' #####")
            with pushd(dirs.build):
                self._cmdShell.run('rm -rf ..?* .[!.]* *')

    def _genBuildContext(self, sidU, dirs, sapadm, sidadm, sapsysGid, host,
                         remoteOs):
        # Generate podman build context
        # pylint: disable=too-many-arguments
        filterFilePath = f'{dirs.tmp}/rsync-filter'
        logging.debug(f"filterFilePath: {filterFilePath}")
        try:
            # pylint: disable=invalid-name, unspecified-encoding
            with open(filterFilePath, 'w') as fh:
                print(self._getRsyncFilter(sidU, dirs, remoteOs), file=fh)
        except IOError:
            fail(f"Error writing to file {filterFilePath}")

        self._genBuildContextFlavor(sidU, dirs, sapadm, sidadm, sapsysGid,
                                    host, filterFilePath)

    def _getRsyncFilter(self, sidU, dirs, remoteOs):
        # Get filter for selective copy depending on flavor
        # pylint: disable=unused-argument
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _genBuildContextFlavor(self, sidU, dirs, sapadm, sidadm, sapsysGid,
                               host, filterFilePath):
        # Flavor dependent actions for build context generation
        # pylint: disable=unused-argument,too-many-arguments
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _genContainerfile(self, sidU, dirs, image, sapadm, sidadm, sapsysGid):
        # Generate containerfile from template depending on flavor
        # MUST RUN AFTER BUILD CONTEXT SETUP
        # pylint: disable=too-many-arguments
        logging.info("##### Generating Containerfile #####")

        sidL = sidU.lower()

        # Common parameters
        if dirs.usrSapReal != '/usr/sap':
            usrSapLinkCmd = f'ln -s {dirs.usrSapReal} /usr/sap'
        else:
            usrSapLinkCmd = 'true'

        # get optional packages
        packages = getattr(self._ctx.cf.images, self._flavor).packages
        pkgParams = self._getOptionalPackageParams(packages, dirs)

        params = {
            'IMAGE_BRANCH': image.branch,
            'IMAGE_COMMIT': image.commit,
            'IMAGE_DATE': image.date,
            'IMAGE_DESCRIPTION': image.description,
            'IMAGE_VERSION': image.version,
            'SAPADM_COMMENT': sapadm.comment,
            'SAPADM_HOME': sapadm.home,
            'SAPADM_SHELL': sapadm.shell,
            'SAPADM_UID': sapadm.uid,
            'SAPMNT': dirs.sapmnt,
            'SAPSYS_GID': sapsysGid,
            'sid': sidL,
            'SID': sidU,
            'SIDADM_COMMENT': sidadm.comment,
            'SIDADM_HOME': sidadm.home,
            'SIDADM_SHELL': sidadm.shell,
            'SIDADM_UID': sidadm.uid,
            'USR_SAP_REAL': dirs.usrSapReal,
            'USR_SAP_LINK_CMD': usrSapLinkCmd,
            'INSTALL_OPT_PACKAGES': pkgParams.installOptPackagesDnf,
            'COPY_OPT_PACKAGE_FILES': pkgParams.copyOptPackageFiles,
            'INSTALL_OPT_PACKAGE_FILES': pkgParams.installOptPackageFiles
        }

        params.update(self._getContainerfileParams(sidU, dirs))
        containerfile = f'{dirs.tmp}/containerfile'
        template = f'{dirs.repoRoot}/openshift/images/{self._flavor}/containerfile.template'
        genFileFromTemplate(template, containerfile, params)
        try:
            # pylint: disable=invalid-name, unspecified-encoding
            with open(containerfile) as fh:
                logging.debug(
                    f"Contents of '{containerfile}': >>>\n{fh.read()}<<<")
        except IOError:
            fail(f"Error reading from {containerfile}")
        return containerfile

    def _getContainerfileParams(self, sidU, dirs):
        # Non-common containerfile template parameters depending on flavor
        # pylint: disable=unused-argument
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _buildImage(self, buildCmd, dirs, image, containerfile):
        # Build image
        # MUST RUN AFTER BUILD CONTEXT SETUP
        # pylint: disable=no-self-use
        logging.info("##### Building image #####")
        with pushd(dirs.build):
            self._cmdShell.run(
                f'{buildCmd} build -t {image.tag} -f "{containerfile}" .')

    def _getOptionalPackageParams(self, packages, dirs):
        # Check if optional packages must be installed
        # and set them
        pkgParams = types.SimpleNamespace()
        pkgParams.installOptPackagesDnf = ''
        pkgParams.copyOptPackageFiles = ''
        pkgParams.installOptPackageFiles = ''

        if len(packages) > 0:
            self._addDependencies(packages, pkgParams)
            self._addDnfInstallablePackages(packages, pkgParams)
            self._addRpmPackages(packages, pkgParams, dirs)
        return pkgParams

    def _addDependencies(self, packages, pkgParams):
        # Set dependencies for optional packages
        firstRun = pkgParams.installOptPackagesDnf == ""

        for package in packages:
            if len(package.dependencies) > 0:
                if firstRun:
                    pkgParams.installOptPackagesDnf = 'RUN  dnf -y install'
                    firstRun = False
                else:
                    pkgParams.installOptPackagesDnf += ' && \\' + '\n'
                    pkgParams.installOptPackagesDnf += '     dnf -y install'

                for dependency in package.dependencies:
                    logging.debug(f"Adding dependency '{dependency}' " +
                                  f"for package '{package.packageName}'")
                    pkgParams.installOptPackagesDnf += f' {dependency}'

    def _addDnfInstallablePackages(self, packages, pkgParams):
        # set all packages to be installed using dnf
        firstRun = pkgParams.installOptPackagesDnf == ""
        for package in packages:
            if package.dnfInstallable:
                logging.debug(
                    f'package {package.packageName} installable via dnf install'
                )

                if firstRun:
                    pkgParams.installOptPackagesDnf = 'RUN  dnf -y install'
                    firstRun = False
                else:
                    pkgParams.installOptPackagesDnf += ' && \\' + '\n'
                    pkgParams.installOptPackagesDnf += '     dnf -y install'
                if package.repository != "":
                    pkgParams.installOptPackagesDnf += f' --enablerepo={package.repository}'
                    logging.debug(
                        f'enabling repository    : {package.repository}')
                pkgParams.installOptPackagesDnf += f' {package.packageName}'

    def _addRpmPackages(self, packages, pkgParams, dirs):
        # set all packages which must be copied and installed using rpm
        firstRun = pkgParams.copyOptPackageFiles == ""
        for package in packages:
            if not package.dnfInstallable:
                logging.debug(
                    f'package {package.packageName} must be installed via rpm')
                if firstRun:
                    pkgParams.copyOptPackageFiles = 'COPY '
                    pkgParams.installOptPackageFiles = 'RUN  '
                    firstRun = False
                else:
                    pkgParams.copyOptPackageFiles += ' && \\' + '\n' + '     '
                    pkgParams.installOptPackageFiles += ' && \\' + '\n' + '     '

                try:
                    rpmFileName = getRpmFileForPackage(package.packageName,
                                                       dirs.defaultPackagesDir)
                    pkgParams.copyOptPackageFiles += f'{dirs.defaultPackagesDir}'
                    pkgParams.copyOptPackageFiles += f'/{rpmFileName} / '
                    pkgParams.installOptPackageFiles += f'rpm -i /{rpmFileName} && \\' + '\n'
                    pkgParams.installOptPackageFiles += f'     rm /{rpmFileName}'
                except RpmFileNotFoundException as exp:
                    fail(exp.errorText)

    def _cleanupAtEnd(self, dirs):
        # Cleanup after image build
        with pushd(dirs.repoRoot):
            # self._cmdShell.run(f'\\rm -rf {dirs.build}')
            pass
class Config(ConfigBase):
    """ Configuration management """
    def __init__(self, ctx, create=False, failOnDiscoveryError=True):
        configFile = ctx.ar.config_file

        self._noFileMsg = f"Configuration file '{configFile}' does not exist"

        self._cmdSshNws4 = None  # Set in _discoverNws4()
        self._cmdSshHdb = None  # Set in _discoverHdb()

        super().__init__(ctx, './config.yaml.template', configFile, create)

        if create:
            return

        configCacheFile = f'{configFile}.cache'
        configCacheTimeout = 600  # seconds

        configMtime = self._getMtime(configFile)  # seconds since the Epoch
        configCacheMtime = self._getMtime(configCacheFile)

        # self._config = ConfigBase.cleanup(self._getConfigFromFile(configFile))
        self._config = self.getObj()
        configCached = self._read(configCacheFile)

        if not configCached:
            # Config cache file does not exist -> discover
            logging.debug(
                f"Config cache file '{configCacheFile}' does not exist")
            self._discoverAndCache(configCacheFile, configCacheTimeout,
                                   failOnDiscoveryError)

        elif self._referenceSystemChanged(configCached):
            # Reference SAP system changed
            self._discoverAndCache(configCacheFile, configCacheTimeout,
                                   failOnDiscoveryError)

        elif configMtime > configCacheMtime:
            # Original config was changed after config was cached
            logging.debug(f"Configuration file '{configFile}' is newer"
                          f" than cached configuration '{configCacheFile}'")
            self._discoverAndCache(configCacheFile, configCacheTimeout,
                                   failOnDiscoveryError)

        elif float(configCached['expiryTime']) < self._getCurrentTime():
            # Cached config is expired
            logging.debug('Cached configuration is expired')
            self._discoverAndCache(configCacheFile, configCacheTimeout,
                                   failOnDiscoveryError)

        else:
            logging.debug(
                f"Using cached configuration from '{configCacheFile}'")
            self._config = configCached

        logging.debug(f'self._config >>>{self._config}<<<')

    # Public methods

    def getFull(self):
        """ Get full configuration (inlcuding discovered parts) as nested namespace """
        logging.debug(f'self._config >>>{self._config}<<<')
        cleaned = ConfigBase.cleanup(self._config)
        logging.debug(f'cleaned >>>{cleaned}<<<')
        # return objToNestedNs(ConfigBase.cleanup(self._config))
        return objToNestedNs(self._config)

    def getImageFlavors(self):
        """ Get image flavors """
        return list(self._config['images'].keys())

    def getContainerFlavors(self):
        """ Get image flavors """
        return list(self._config['ocp']['containers'].keys())

    # Private methods

    def _getCurrentTime(self, ):
        return time.time()

    def _getMtime(self, file):
        mtime = -1
        if pathlib.Path(file).exists():
            mtime = pathlib.Path(file).stat().st_mtime
        return mtime

    def _referenceSystemChanged(self, configCached):
        """ Check whether configured reference system differs from cached reference system """

        configuredHost = self._config['refsys']['nws4']['host']['name']
        configuredSid = self._config['refsys']['nws4']['sid'].upper()
        cachedHost = configCached['refsys']['nws4']['host']['name']
        cachedSid = configCached['refsys']['nws4']['sidU']

        systemChanged = False
        systemChanged = systemChanged or configuredHost != cachedHost
        systemChanged = systemChanged or configuredSid != cachedSid

        if systemChanged:
            logging.debug(
                f"Configured reference system '{configuredSid}@{configuredHost}'"
                f" differs from cached reference system '{cachedSid}@{cachedHost}'"
            )

        return systemChanged

    def _discoverAndCache(self, configCacheFile, configCacheTimeout,
                          failOnDiscoveryError):
        logging.debug('Running configuration discovery')

        # self._checkRequiredOptional()

        try:
            self._discover()
            self._config['expiryTime'] = str(self._getCurrentTime() +
                                             configCacheTimeout)

            logging.debug(f"Writing config to cache file '{configCacheFile}'")

            # pylint: disable=unspecified-encoding
            with open(configCacheFile, 'w') as ccfh:
                yaml.dump(self._config, stream=ccfh)
        except _DiscoveryError as derr:
            message = '\nThe following problem occured during configuration discovery:\n\n'
            message += f"{'-'*65}\n"
            message += f'{derr}\n'
            message += f"{'-'*65}\n"
            if failOnDiscoveryError:
                fail(message)
            else:
                message += '\nThe configuration cache file was not written. Proceeding with\n'
                message += 'possibly incomplete configuration. This may lead to runtime\n'
                message += 'errors. Check your configuration file\n\n'
                message += f"    {self._instanceFile }\n\n"
                message += 'and correct the error.\n\n'
                warn(message)

    def _discover(self):
        self._config['images'] = {}
        self._discoverNfs()
        self._discoverInit()
        self._discoverNws4()
        self._discoverHdb()
        self._discoverOcp()

    def _getInstno(self, cmdSsh, sidU, instPrefix, host):
        cmd = f'grep -E "SAPSYSTEM +" /usr/sap/{sidU}/SYS/profile/{sidU}_{instPrefix}*_{host}'
        result = cmdSsh.run(cmd)
        if result.rc > 0:
            raise _DiscoveryError(
                f"Could not discover instance number of {sidU} on host {host}\n"
                f"The profile"
                f" /usr/sap/{sidU}/SYS/profile/{sidU}_{instPrefix}*_{host}"
                f" might not exist.\n"
                f"Check if the parameters you specified for your SAP reference"
                f" system are valid.")

        return result.out.split('\n')[0].split()[2]

    def _getTimeZone(self, cmdSsh):
        return cmdSsh.run(
            'timedatectl | grep  Time | cut -d ":" -f2 | cut -d " " -f2').out

    def _getHostByName(self, host):
        try:
            ipAddr = socket.gethostbyname(host)
        except Exception as nameResolutionError:
            raise _DiscoveryError(
                f"Cannot resolve IP address for '{host}'\n"
                f"Ensure that hostname '{host}' is correct,\n"
                f"and check the name resolution record of it."
            ) from nameResolutionError
        return ipAddr

    def _getImageNames(self, flavor):
        if flavor == 'init':
            short = 'soos-init'
        else:
            short = f'soos-{self._config["refsys"][flavor]["sidL"]}'

        local = f'localhost/{short}:latest'

        ocp = f'default-route-openshift-image-registry.apps.{self._config["ocp"]["domain"]}'
        ocp += f'/{self._config["ocp"]["project"]}/{short}:latest'

        return {'short': short, 'local': local, 'ocp': ocp}

    def _getContainerName(self, containerFlavor):
        if containerFlavor in ['init', 'hdb']:
            shortName = self._config['images'][containerFlavor]['names'][
                'short']

        elif containerFlavor in ['di', 'ascs']:
            shortName = f"{self._config['images']['nws4']['names']['short']}-{containerFlavor}"

        else:
            raise _DiscoveryError(
                f"Unknown container flavor '{containerFlavor}'")

        return shortName

    def _discoverInit(self):
        # Image names

        self._config['images']['init'] = {'names': self._getImageNames('init')}

    def _discoverNws4(self):

        # Host and <sid>adm user

        host = self._config['refsys']['nws4']['host']['name']
        sidL = self._config['refsys']['nws4']['sid'].lower()
        sidU = self._config['refsys']['nws4']['sid'].upper()

        self._config['refsys']['nws4']['sidL'] = sidL
        self._config['refsys']['nws4']['sidU'] = sidU
        del self._config['refsys']['nws4']['sid']

        user = self._ctx.cr.refsys.nws4.sidadm
        if user.name != f'{sidL}adm':
            raise _DiscoveryError(
                f"Mismatch between credentials file nws4 user name '{user.name}'\n"
                f"and derived configuration file nws4 user name '{sidL}adm.'\n"
                f"Check credentials file parameter 'refsys.nws4.sidadm.name' and\n"
                f"configuration file parameter 'refsys.nws4.sid' and correct the\n"
                f"wrong value.")

        self._config['refsys']['nws4']['host']['ip'] = self._getHostByName(
            host)

        self._cmdSshNws4 = CmdSsh(self._ctx, host, user, check=False)
        if self._cmdSshNws4.passwordNeeded():
            print(f"Enter password for user {user.name} running on {host}")
        res = self._cmdSshNws4.run('true')

        if res.rc != 0:
            msg = self._cmdSshNws4.formatSshError(res, host, user)
            raise _DiscoveryError(
                f"{msg}\n\n"
                "In addition neither the\n"
                "   - SAP SID of the HANA instance\n"
                "nor the\n"
                "   - the hostname on which the HANA instance is running\n"
                "can be discovered\n")

        # User and group ID of <sid>adm

        (uid, gid) = self._cmdSshNws4.run(
            f'grep "{user.name}" /etc/passwd').out.split(':')[2:4]
        self._config['refsys']['nws4']['sidadm'] = {'uid': uid, 'gid': gid}

        # Time zone

        self._config['refsys']['nws4']['timezone'] = self._getTimeZone(
            self._cmdSshNws4)

        # sapmnt base directory

        self._config['refsys']['nws4']['base'] = {}
        self._config['refsys']['nws4']['base']['sapmnt'] = self._getSapmntDir(
            sidU)

        # SAPFQDN
        defaultProfile = self._getDefaultProfile(sidU)
        result = self._cmdSshNws4.run(f'grep "^SAPFQDN" {defaultProfile}')

        if result.rc > 0:
            logging.warning("Could not discover SAPFQDN "
                            f"from {defaultProfile}")
            self._config['refsys']['nws4']['sapfqdn'] = ""
        else:
            self._config['refsys']['nws4']['sapfqdn'] = result.out.split(
                '\n')[0].split()[2]

        # Instance specific parameters

        ascsInstno = self._getInstno(self._cmdSshNws4, sidU, 'ASCS', host)
        diInstno = self._getInstno(self._cmdSshNws4, sidU, 'D', host)

        self._config['refsys']['nws4']['ascs'] = {
            # Instance number
            'instno': ascsInstno,

            # Default profile name
            'profile': f'{sidU}_ASCS{ascsInstno}_{host}'
        }

        self._config['refsys']['nws4']['di'] = {
            # Instance number
            'instno': diInstno,

            # Default profile name
            'profile': f'{sidU}_D{diInstno}_{host}'
        }

        # Image names

        self._config['images']['nws4'] = {'names': self._getImageNames('nws4')}

        # Set optional package names to be installed

        self._config['images']['nws4']['packages'] = []

    def _discoverHdb(self):
        self._config['refsys']['hdb'] = {}

        host = self._discoverHdbHost()
        sid = self._discoverHdbSid()
        sidL = sid.lower()
        sidU = sid.upper()

        # self._config['refsys']['hdb']['sid']  = sid
        self._config['refsys']['hdb']['sidL'] = sidL
        self._config['refsys']['hdb']['sidU'] = sidU

        user = self._ctx.cr.refsys.hdb.sidadm
        if user.name != f'{sidL}adm':
            raise _DiscoveryError(
                f"Mismatch between credentials file hdb user name '{user.name}'\n"
                f"and derived configuration file hdb user name '{sidL}adm.'\n"
                f"Check credentials file parameter 'refsys.hdb.sidadm.name' and\n"
                f"configuration file parameter 'refsys.hdb.sid' and correct the\n"
                f"wrong value.")

        self._config['refsys']['hdb']['host'] = {
            'name': host,
            'ip': self._getHostByName(host)
        }

        self._cmdSshHdb = CmdSsh(self._ctx, host, user, check=False)
        if self._cmdSshHdb.passwordNeeded():
            print(f"Enter password for user {user.name} running on {host}")
        res = self._cmdSshHdb.run('true')

        if res.rc != 0:
            msg = self._cmdSshHdb.formatSshError(res, host, user)
            raise _DiscoveryError(f"{msg}\n\n")

        # User and group ID of <sid>adm
        # Must be performed on HDB host!

        result = self._cmdSshHdb.run(f'grep "{user.name}" /etc/passwd')
        if result.rc > 0:
            raise _DiscoveryError(
                f"Could not discover uid and gid for user {user.name}.")
        (uid, gid) = result.out.split(':')[2:4]
        self._config['refsys']['hdb']['sidadm'] = {'uid': uid, 'gid': gid}

        # Time zone

        self._config['refsys']['hdb']['timezone'] = self._getTimeZone(
            self._cmdSshHdb)

        # Instance specific parameters
        # Must be performed on HDB host!

        # Instance number

        self._config['refsys']['hdb']['instno'] = self._getInstno(
            self._cmdSshHdb, sidU, 'HDB', host)

        # HDB host rename

        nws4HostName = self._config['refsys']['nws4']['host']['name']
        hdbHostName = self._config['refsys']['hdb']['host']['name']

        if nws4HostName == hdbHostName:
            self._config['refsys']['hdb']['rename'] = 'no'
        else:
            self._config['refsys']['hdb']['rename'] = 'yes'

        # HDB base directories

        self._config['refsys']['hdb']['base'] = {}
        self._config['refsys']['hdb']['base'][
            'shared'] = self._discoverHdbBaseShared(sidU)
        self._config['refsys']['hdb']['base'][
            'data'] = self._discoverHdbBaseData(sidU)
        self._config['refsys']['hdb']['base'][
            'log'] = self._discoverHdbBaseLog(sidU)

        # Image names

        self._config['images']['hdb'] = {'names': self._getImageNames('hdb')}

        # Set optional packages
        packages = self._discoverHdbOptPkgs()
        logging.debug(f'Optional packages for hdb: {packages}')
        self._config['images']['hdb']['packages'] = packages

    def _discoverNfs(self):
        if not self._config['nfs']['host']['name']:
            self._config['nfs']['host']['name'] = self._config['ocp'][
                'helper']['host']['name']

        hostIp = self._getHostByName(self._config['nfs']['host']['name'])
        self._config['nfs']['host']['ip'] = hostIp

    def _discoverOcp(self):
        project = self._config['ocp']['project']

        hostIp = self._getHostByName(
            self._config['ocp']['helper']['host']['name'])
        self._config['ocp']['helper']['host']['ip'] = hostIp

        self._config['ocp']['sa'] = {
            'name': f'{project}-sa',
            'file': f'{project}-service-account.yaml'
        }

        # Containers

        self._config['ocp']['containers']['init'] = {}

        self._config['ocp']['containers']['init'][
            'name'] = self._getContainerName('init')
        self._config['ocp']['containers']['hdb'][
            'name'] = self._getContainerName('hdb')
        self._config['ocp']['containers']['ascs'][
            'name'] = self._getContainerName('ascs')
        self._config['ocp']['containers']['di'][
            'name'] = self._getContainerName('di')

        # Set requested resources for containers

        logging.debug(f'config >>>{yaml.dump(self._config)}<<<')

        # Memory for HDB container
        #
        # discovered size for HDB container:
        # size of the HANA filesystem
        # Value for both limits and requests are set to discovered size
        # if no value specified in configuration

        hdbMinMem = f'{self._discoverHdbSizeGiB()}Gi'

        logging.debug(f'config >>>{yaml.dump(self._config)}<<<')
        for kind in ('requests', 'limits'):
            res = self._config['ocp']['containers']['hdb']['resources'][kind]
            if not res['memory']:
                res['memory'] = hdbMinMem
                logging.warning(getMessage("msgL001", kind, "HDB", hdbMinMem))

        # Memory for NWS4 Dialog Instance container
        #
        # discovered size for NWS4 DI container:
        # PHYS_MEMSIZE if available in Instance Profile
        # or 10 percent of physical memory size of reference system, at least 32GiB
        # Value for both limits and requests are set to discovered size
        # if no value specified in configuration

        diMinMem = f'{self._discoverDiSizeGiB()}Gi'

        logging.debug(f'config >>>{yaml.dump(self._config)}<<<')
        for kind in ('requests', 'limits'):
            res = self._config['ocp']['containers']['di']['resources'][kind]
            if not res['memory']:
                res['memory'] = diMinMem
                logging.warning(
                    getMessage("msgL001", kind, "Dialog Instance", diMinMem))

    def _discoverHdbSid(self):
        defaultProfile = self._getDefaultProfile(
            self._config['refsys']['nws4']['sidU'])
        cmd = f'grep dbs/hdb/dbname {defaultProfile}'
        result = self._cmdSshNws4.run(cmd)
        if result.rc > 0:
            raise _DiscoveryError(
                f"Could not discover HANA SID from {defaultProfile}")
        return result.out.split('=')[1].strip()

    def _discoverHdbHost(self):
        defaultProfile = self._getDefaultProfile(
            self._config['refsys']['nws4']['sidU'])
        cmd = f'grep SAPDBHOST {defaultProfile}'
        result = self._cmdSshNws4.run(cmd)
        if result.rc > 0:
            raise _DiscoveryError(
                f"Could not discover SAPDBHOST from {defaultProfile}")
        return result.out.split('=')[1].strip()

    def _discoverHdbBaseShared(self, sidU):
        profile = f'/usr/sap/{sidU}/SYS/profile'
        out = self._cmdSshHdb.run(f'readlink {profile}').out
        # example for out:
        # /hana/shared/SID/profile
        # after splitting it:
        # ['','hana','shared','SID','profile']
        # We ignore the last three components
        return '/'.join(out.split('/')[:-3])

    def _discoverHdbBaseData(self, sidU):
        return self._getBasePathFromGlobalIni(sidU, 'data')

    def _discoverHdbBaseLog(self, sidU):
        return self._getBasePathFromGlobalIni(sidU, 'log')

    def _discoverHdbSizeGiB(self):
        """ Discover storage in GiB needed for HDB content """
        sidU = self._config['refsys']['hdb']['sidU']
        dataDir = f"{self._config['refsys']['hdb']['base']['data']}/data/{sidU}"
        out = self._cmdSshHdb.run(f'du -s -B 1G {dataDir} | cut -f1').out
        return int(out) + self._ctx.cs.additionalFreeSpaceHdbGiB

    def _discoverHdbOptPkgs(self):
        # to get the version of the HANA DB, we need the path to the instance directory
        sidU = self._config['refsys']['hdb']['sidU']
        instno = self._config['refsys']['hdb']['instno']
        spsLevel = getSpsLevelHdb(self._cmdSshHdb, sidU, instno)
        logging.debug(f'HANA DB SPS Level: {spsLevel}')

        optionalHdbPkgs = []
        for pkg in self._ctx.cs.optionalHdbPkgs:
            if pkg.minSpsLevel <= spsLevel < pkg.maxSpsLevel:
                logging.debug(
                    f'Optional package to be installed: {pkg.packageName}')
                logging.debug(f'enabling repository: {pkg.repository}')
                pkg.dnfInstallable = isRepoAccessible(pkg.repository)
                optionalHdbPkgs.append(pkg)
        return optionalHdbPkgs

    def _discoverDiSizeGiB(self):
        """ Discover storage in GiB needed for Dialog Instance """
        memsize = self._discoverDiSizeFromInstProfileGiB()
        if memsize > 0:
            return memsize
        return max(self._discoverDiSizeFromRefHostGiB(),
                   self._ctx.cs.minMemSizeDIGiB)

    def _discoverDiSizeFromInstProfileGiB(self):
        profile = self._getInstanceProfile()
        memsizeInMiB = self._cmdSshNws4.run(
            f'grep PHYS_MEMSIZE {profile} | cut -d = -f2').out
        if not memsizeInMiB:
            return 0
        return int(memsizeInMiB) // 1024

    def _discoverDiSizeFromRefHostGiB(self):
        # Output of 'grep MemTotal /proc/meminfo' looks like:
        # MemTotal:       64819648 kB
        #
        cmd = "grep MemTotal /proc/meminfo "
        cmd += "| cut -d : -f2 "
        memsizeInKb = int(self._cmdSshNws4.run(cmd).out.split()[0])
        memsizeInGiB = memsizeInKb // 1024 // 1024

        # The size is set to 10% of MemTotal (according to SAP Settings)
        return memsizeInGiB // 10

    def _getSapmntDir(self, sidU):
        profilePath = self._cmdSshNws4.run(f'find /usr/sap/ -type l -ipath '
                                           f'"*{sidU}/SYS/profile"').out
        profileTarget = self._cmdSshNws4.run(f'readlink "{profilePath}"').out
        return profileTarget[0:profileTarget.index(f'/{sidU}/profile')]

    def _getInstanceProfile(self):
        sidU = self._config['refsys']['nws4']['sidU']
        profile = f'/usr/sap/{sidU}/SYS/profile/'
        profile += self._config['refsys']['nws4']['di']['profile']
        return profile

    def _getDefaultProfile(self, sidU):
        return f'/usr/sap/{sidU}/SYS/profile/DEFAULT.PFL'

    def _getBasePathFromGlobalIni(self, sidU, baseType):
        if baseType not in ['data', 'log']:
            raise _DiscoveryError("Internal error: wrong baseType specified")
        # There exist more than on location of the global.ini
        # They are ordered in different layers:
        # Default
        # System
        # Database
        # Host
        # The parameters are taken from top to bottom.
        # https://help.sap.com/viewer/6b94445c94ae495c83a19646e7c3fd56/2.0.04/en-US/3f1a6a7dc31049409e1a9f9108d73d51.html

        instno = self._config['refsys']['hdb']['instno']
        hostname = self._config['refsys']['hdb']['host']['name']
        sapmnt = self._config['refsys']['hdb']['base']['shared']

        locationlist = [
            f"/usr/sap/{sidU}/HDB{instno}/exe/config",
            f"{sapmnt}/{sidU}/SYS/global/hdb/custom/config",
            f"/usr/sap/{sidU}/SYS/global/hdb/custom/config",
            f"{sapmnt}/{sidU}/SYS/global/hdb/custom/config/DB_{sidU}",
            f"/usr/sap/{sidU}/SYS/global/hdb/custom/config/DB_{sidU}",
            f"/usr/sap/{sidU}/HDB{instno}/{hostname}"
        ]

        for location in locationlist:
            basepath = f"basepath_{baseType}volumes"
            cmd = f'grep "{basepath}[= ]" {location}/global.ini'
            result = self._cmdSshHdb.run(cmd)
            if result.rc == 0:
                # Example for result.out
                # basepath_datavolumes = /sapmnt/hana/data/HD1
                # We need the basepath, which means:
                # /sapmnt/hana

                # Get first the complete path itself
                tempPath = result.out.split('=')[1].strip()

                # Then the basepath itself
                path = '/'.join(tempPath.split('/')[:-2])

        # It might happen that the path contains a SAP profile/environment variable
        # such as $(DIR_GLOBAL). This must be replaced.
        # The parameter value can be got from call sappfpar
        path = self._replaceSAPPfpar(sidU, path)
        if not path:
            raise _DiscoveryError(
                f"Could not get the base path for {basepath} from one of the global.ini files"
            )
        return path

    def _replaceSAPPfpar(self, sidU, path):
        if "$(" not in path:
            return path

        subDirs = path.split('/')
        for subDir in subDirs:
            if "$(" in subDir:
                sappfpar = subDir
                value = getSAPPfparValue(self._cmdSshHdb, sidU, "hdb",
                                         sappfpar)
                if not value:
                    raise _DiscoveryError(
                        f"Could not get value for {sappfpar} from sappfpar call"
                    )
                subDirs[subDirs.index(sappfpar)] = value
                break
        return '/'.join(subDirs)
Esempio n. 5
0
class AuthorizedKeys():
    """ Representation of an authorized_keys file of a specific user at a specific host """
    def __init__(self, ctx, hname, user):

        self._userFull = f'{user.name}@{hname}'
        self._authKeys = None

        # Prepare command execution

        self._cmdShell = CmdShell()
        self._cmdSsh = CmdSsh(ctx, hname, user)
        self._rsyncSsh, self._rsyncSshSecrets = self._cmdSsh.getSshCmdAndSecrets(
            withLogin=False)

        # Get the path to the remote authorized_keys file

        homeDir = getHomeDir(ctx, hname, user)

        if not homeDir:
            fail(
                f"Could not determine the home directory of '{self._userFull}'"
            )

        self._akPath = f'{homeDir}/.ssh/authorized_keys'

        # Read the authorized_keys file

        info = _PublicKeyInformation(hname, user, self._akPath)

        self._read(info)  # Sets self._authKeys

        logging.debug(
            f'self._authKeys >>>\n{self._authKeys}\n<<< self._authKeys')

    def __str__(self):
        return str(self._authKeys)

    def _read(self, info):
        """ Read the contents of the authorized_keys file """

        res = self._cmdSsh.run(f'cat {self._akPath}')

        if res.rc != 0:
            fail(
                f"Could not get the authorized keys '{self._akPath}' file of '{self._userFull}'"
            )

        self._authKeys = _PublicKeys(res.out, info, keepAll=True)

    def write(self):
        """ Write the contents of the authorized_keys file """

        # Write the contents to a temporary local file and transfer the file to
        # the remote user's .ssh directory using rsync
        # Keep a backup of the original authorized_keys file on the remote side

        with tempfile.NamedTemporaryFile(mode='w') as akFh:
            print(self._authKeys, file=akFh, flush=True)

            source = akFh.name
            target = self._akPath

            backupSuffix = '.bak'

            rsyncCmd = f'rsync -av -e "{self._rsyncSsh}"'
            rsyncCmd += f' --backup --suffix "{backupSuffix}"'
            rsyncCmd += f' "{source}" "{self._userFull}:{target}"'

            res = self._cmdShell.run(rsyncCmd, self._rsyncSshSecrets)

            if res.rc != 0:
                fail(f"Could not write the authorized keys file '{target}'"
                     f" of '{self._userFull}\n({res.err})")

    def add(self, keys):
        """ Add keys in a key list to the internal authorized_keys list """
        return self._authKeys.addKeys(keys)

    def remove(self, keys):
        """ Remove keys in a key list from internal authorized_keys list """
        return self._authKeys.removeKeys(keys)

    def numKeys(self):
        """ Return the number of keys in the internal authorized_keys key list """
        return self._authKeys.numKeys()
Esempio n. 6
0
    def create(ctx, overlayUuid):
        """ Create a new overlay filesystem share on the NFS server """

        cmdSsh = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user)

        # Making an overlay-fs NFS-mountable requires additional mount
        # options when establishing the overlay-fs; see also:
        #
        #   https://serverfault.com/questions/949892/nfs-export-an-overlay-of-ext4-and-btrfs
        #   https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt
        #
        # XXX NEEDED OPTIONS MAY DEPEND ON FILESYSTEM TYPE # pylint: disable=W0511
        #     OF lower, work AND upper -
        #     THIS MAY VARY FROM CUSTOMER TO CUSTOMER

        # NFS specific mount options for each overlay file system

        nfsOpts = []
        # nfsOpts.append('comment=merge')
        nfsOpts.append('nfs_export=on')
        nfsOpts.append('index=on')
        # nfsOpts.append('redirect_dir=nofollow')
        # nfsOpts.append('xino=on')

        # Create the directory structure for each overlay file system,
        # establish the overlay fs and add a corresponding entry to /etc/exports

        exportOptsGeneric = ''
        exportOptsGeneric += 'rw'
        exportOptsGeneric += ',insecure'
        exportOptsGeneric += ',no_root_squash'
        exportOptsGeneric += ',sync'

        for subDir in getHdbSubDirs(ctx):
            ovld = getOverlayDirs(ctx, subDir.path, overlayUuid)
            cmdSsh.run(
                f'mkdir -p "{ovld.upper}" "{ovld.work}" "{ovld.merged}"')

            # Add to /etc/fstab for automatic mount after reboot
            # use noauto,x-systemd.automount to mount via systemd and automount

            fstabOpts = f'noauto,x-systemd.automount,{",".join(nfsOpts)},'
            fstabOpts += f'lowerdir={ovld.lower},upperdir={ovld.upper},workdir={ovld.work}'
            cmdSsh.run(
                f'echo "overlay {ovld.merged} overlay {fstabOpts} 0 0" >> /etc/fstab'
            )

            mountCmd = f'mount {ovld.merged}'

            cmdSsh.run(mountCmd)

            # Need to make the file systems unique - otherwise rpc.mountd
            # will always offer the first mounted file system.

            exportOpts = exportOptsGeneric + f',fsid={uuid.uuid1()}'

            cmdSsh.run(f'echo "{ovld.merged} *({exportOpts})" >> /etc/exports')

        # Create the persistence directories

        persistenceDir = getPersistenceDir(ctx, overlayUuid)

        persistenceDirNws4 = f'{persistenceDir}/{ctx.cf.refsys.nws4.sidU}'

        cmdSsh.run(f'mkdir -p "{persistenceDirNws4}"')
        cmdSsh.run(
            f'chown {ctx.cf.refsys.nws4.sidadm.uid}:{ctx.cf.refsys.nws4.sidadm.gid}'
            f' "{persistenceDirNws4}"')
        cmdSsh.run(f'chmod 755 "{persistenceDirNws4}"')

        persistenceDirHdb = f'{persistenceDir}/{ctx.cf.refsys.hdb.sidU}'

        cmdSsh.run(f'mkdir -p "{persistenceDirHdb}"')
        cmdSsh.run(
            f'chown {ctx.cf.refsys.hdb.sidadm.uid}:{ctx.cf.refsys.hdb.sidadm.gid}'
            f' "{persistenceDirHdb}"')
        cmdSsh.run(f'chmod 755 "{persistenceDirHdb}"')

        cmdSsh.run(
            f'echo "{persistenceDir} *({exportOptsGeneric})" >> /etc/exports')

        # Export the overlay and persistence file systems

        cmdSsh.run('exportfs -ar')

        # Return the uuid of the created file systems

        return Overlays(ctx).find(overlayUuid)
Esempio n. 7
0
class Overlay():
    """ Representation of an overlay filesystem share """
    @staticmethod
    def create(ctx, overlayUuid):
        """ Create a new overlay filesystem share on the NFS server """

        cmdSsh = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user)

        # Making an overlay-fs NFS-mountable requires additional mount
        # options when establishing the overlay-fs; see also:
        #
        #   https://serverfault.com/questions/949892/nfs-export-an-overlay-of-ext4-and-btrfs
        #   https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt
        #
        # XXX NEEDED OPTIONS MAY DEPEND ON FILESYSTEM TYPE # pylint: disable=W0511
        #     OF lower, work AND upper -
        #     THIS MAY VARY FROM CUSTOMER TO CUSTOMER

        # NFS specific mount options for each overlay file system

        nfsOpts = []
        # nfsOpts.append('comment=merge')
        nfsOpts.append('nfs_export=on')
        nfsOpts.append('index=on')
        # nfsOpts.append('redirect_dir=nofollow')
        # nfsOpts.append('xino=on')

        # Create the directory structure for each overlay file system,
        # establish the overlay fs and add a corresponding entry to /etc/exports

        exportOptsGeneric = ''
        exportOptsGeneric += 'rw'
        exportOptsGeneric += ',insecure'
        exportOptsGeneric += ',no_root_squash'
        exportOptsGeneric += ',sync'

        for subDir in getHdbSubDirs(ctx):
            ovld = getOverlayDirs(ctx, subDir.path, overlayUuid)
            cmdSsh.run(
                f'mkdir -p "{ovld.upper}" "{ovld.work}" "{ovld.merged}"')

            # Add to /etc/fstab for automatic mount after reboot
            # use noauto,x-systemd.automount to mount via systemd and automount

            fstabOpts = f'noauto,x-systemd.automount,{",".join(nfsOpts)},'
            fstabOpts += f'lowerdir={ovld.lower},upperdir={ovld.upper},workdir={ovld.work}'
            cmdSsh.run(
                f'echo "overlay {ovld.merged} overlay {fstabOpts} 0 0" >> /etc/fstab'
            )

            mountCmd = f'mount {ovld.merged}'

            cmdSsh.run(mountCmd)

            # Need to make the file systems unique - otherwise rpc.mountd
            # will always offer the first mounted file system.

            exportOpts = exportOptsGeneric + f',fsid={uuid.uuid1()}'

            cmdSsh.run(f'echo "{ovld.merged} *({exportOpts})" >> /etc/exports')

        # Create the persistence directories

        persistenceDir = getPersistenceDir(ctx, overlayUuid)

        persistenceDirNws4 = f'{persistenceDir}/{ctx.cf.refsys.nws4.sidU}'

        cmdSsh.run(f'mkdir -p "{persistenceDirNws4}"')
        cmdSsh.run(
            f'chown {ctx.cf.refsys.nws4.sidadm.uid}:{ctx.cf.refsys.nws4.sidadm.gid}'
            f' "{persistenceDirNws4}"')
        cmdSsh.run(f'chmod 755 "{persistenceDirNws4}"')

        persistenceDirHdb = f'{persistenceDir}/{ctx.cf.refsys.hdb.sidU}'

        cmdSsh.run(f'mkdir -p "{persistenceDirHdb}"')
        cmdSsh.run(
            f'chown {ctx.cf.refsys.hdb.sidadm.uid}:{ctx.cf.refsys.hdb.sidadm.gid}'
            f' "{persistenceDirHdb}"')
        cmdSsh.run(f'chmod 755 "{persistenceDirHdb}"')

        cmdSsh.run(
            f'echo "{persistenceDir} *({exportOptsGeneric})" >> /etc/exports')

        # Export the overlay and persistence file systems

        cmdSsh.run('exportfs -ar')

        # Return the uuid of the created file systems

        return Overlays(ctx).find(overlayUuid)

    # Instance methods

    # pylint: disable=too-many-arguments

    def __init__(self, ctx, overlayUuid, creationDate, creationTime):
        """ Create an internal data structure representing an
            existing overlay filesystem share on the NFS server """

        self._ctx = ctx

        self.uuid = overlayUuid
        self.date = creationDate
        self.time = creationTime

        self._cmdSsh = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user)

    def __str__(self):
        # return f"{self.uuid} ({self.date} {self.time})"
        return f"{self.uuid} {self.date} {self.time}"

    def delete(self):
        """ Delete an overlay filesystem share on the NFS server """

        # Remove the entries for the overlay and persistence file systems from /etc/exports

        self._cmdSsh.run(f'sed -i.backup -e "/.*{self.uuid}.*/d" /etc/exports')

        # Remove the entries for the overlay and persistence file systems from /etc/fstab

        self._cmdSsh.run(
            f'sed -i.backup -e "/overlay .*{self.uuid}.*/d" /etc/fstab')

        # Remove the overlay file systems from the table of exported NFS file systems

        self._cmdSsh.run('exportfs -ar')

        # Tear down all overlay file systems

        for subDir in getHdbSubDirs(self._ctx):
            ovld = getOverlayDirs(self._ctx, subDir.path, self.uuid)
            self._cmdSsh.run(f'umount {ovld.merged}')
            self._cmdSsh.run(
                f'rm -rf {ovld.base}/{subDir.path}*/* 2>/dev/null')
            self._cmdSsh.run(
                f'rmdir -p {ovld.base}/{subDir.path}* 2>/dev/null')

        # Tear down the persistence file system

        persistenceDir = getPersistenceDir(self._ctx, self.uuid)

        self._cmdSsh.run(f'rm -rf {persistenceDir}/* 2>/dev/null')
        self._cmdSsh.run(f'rmdir -p {persistenceDir} 2>/dev/null')
class RemoteCopy():
    """ Perform selective remote copy with correct symlink preservation """

    def __init__(self, ctx, host, user, filterFilePath='/dev/null'):
        """Perform selective remote copy with correct symlink preservation

        Parameters:

        host:           host of source directory; also ssh and rsync host
        user:           ssh and rsync user
        filterFilePath: optional; path to rsync filter file

        """

        self._host = host
        self._user = user
        self._filterFilePath = filterFilePath

        # Initialize ssh connection

        self._cmdSsh = CmdSsh(ctx, host, user)
        self._rsyncSsh, self._rsyncSshSecrets = self._cmdSsh.getSshCmdAndSecrets(withLogin=False)

    def _runRsync(self, source, filterFilePath, verbose, dryRun):
        logging.debug(f'source: >>>{source}<<<')
        cmdShell = CmdShell()
        cmd = 'rsync -a --relative'
        cmd += f' -e "{self._rsyncSsh}"'
        if verbose > 0:
            cmd += ' -'+'v'*verbose
        cmd += f' -f "merge {filterFilePath}"'
        if dryRun:
            cmd += ' -n'
        if not isinstance(source, list):
            cmd += f' {self._user.name}@{self._host}:{source} ./'
            cmdShell.run(cmd, self._rsyncSshSecrets)
        else:
            with tempfile.NamedTemporaryFile(mode='w') as tfh:
                tfh.write("\n".join(str(fn) for fn in source))
                tfh.flush()
                logging.debug(f"Contents of file '{tfh.name}':")
                logging.debug('>>>')
                # pylint: disable=unspecified-encoding
                with open(tfh.name) as rfh:
                    logging.debug(rfh.read())
                logging.debug('<<<')
                cmd += f' -r --files-from={tfh.name}'
                cmd += f' {self._user.name}@{self._host}:/ ./'
                cmdShell.run(cmd, self._rsyncSshSecrets)

    def _getRealPathAndSymlinks(self, path, targets):
        logging.debug(f"path '{path}', targets '{targets}'")
        curPath = ''
        for component in list(filter(lambda x: x != '', path.split('/'))):
            curDir = curPath
            curPath = curDir + '/' + component
            logging.debug(f"Current path '{curPath}'")
            if curPath not in targets.keys():
                logging.debug(f"Visiting new path '{curPath}'")
                target = self._cmdSsh.run(f'readlink {curPath}').out
                if not target:
                    targets[curPath] = None
                else:
                    logging.debug(f"found symlink '{curPath}', target '{target}'")
                    if not target.startswith('/'):
                        relTarget = target
                        target = os.path.normpath(curDir + '/' + target)
                        logging.debug(f"Converted relative target '{relTarget}'"
                                      f" to absolute target '{target}'")
                    (targets[curPath], targets) = self._getRealPathAndSymlinks(target, targets)
            if targets[curPath]:
                curPath = targets[curPath]
        logging.debug(f"returning path '{curPath}', targets >>>{targets}<<<")
        return (curPath, targets)

    def _symlinkConvertRelToAbs(self, symlink, linkTarget):
        if not linkTarget.startswith('/'):
            relTarget = linkTarget
            linkTarget = os.path.join(os.path.dirname(symlink), linkTarget)[1:]  # skip leading '.'
            logging.debug(f"Converted relative target '{relTarget}' "
                          f"to absolute target '{linkTarget}'")
        return linkTarget

    def copy(self, source, filterFilePath=None, verbose=1, dryRun=False):
        """ Perform remote copy

        Parameters:

        source        : root of source directorytree; must be an absolute path
        filterFilePath: optional: path to file containing rsync filters;
                        if not supplied filter file path supplied to constructor will be used
        verbose       : optional: set rsync verbose level;
                        choices: [0, 1, 2, 3], corresponding to rsync verbose levels
                        [<none>, '-v', '-vv', '-vvv']
        dryRun        : optional: if set to True, perform a trial rsync run with no changes made'

        """

        if not filterFilePath:
            filterFilePath = self._filterFilePath

        logging.info(f"Remote copy of '{source}' started.")

        logging.debug(f'source         >>>{source        }<<<')
        logging.debug(f'filterFilePath >>>{filterFilePath}<<<')
        logging.debug(f'verbose        >>>{verbose       }<<<')
        logging.debug(f'dryRun         >>>{dryRun        }<<<')

        cmdShell = CmdShell()

        symlinksVisited = []
        existingSymlinks = cmdShell.run('find ./ -type l').out
        if existingSymlinks:
            # Do not follow local existing symlinks
            symlinksVisited = existingSymlinks.strip().split('\n')
        logging.debug(f'symlinksVisited >>>{symlinksVisited}<<<')
        # rsync root of source directory supplied on command line
        (realPath, targets) = self._getRealPathAndSymlinks(source, {})
        self._runRsync(realPath, filterFilePath, verbose, dryRun)
        # If root of source directory tree is a symlink itself append it to list of visited links
        logging.debug(f"source  : '{source}'")
        logging.debug(f"realPath: '{realPath}'")
        if realPath != source:
            # cmdShell.run(f'ln -s {realPath} .{source}')
            symlinksVisited.append(f'.{source}')
        # Recursively detect all symlinks and rsync their targets
        finished = False
        while not finished:
            finished = True
            symlinksFound = cmdShell.run('find ./ -type l').out
            logging.debug(f'symlinksFound >>>{symlinksFound}<<<')
            logging.debug(f'symlinksVisited >>>{symlinksVisited}<<<')
            if symlinksFound:
                symlinksFound = symlinksFound.strip().split('\n')
                realPaths = []
                for symlink in symlinksFound:
                    if (symlink not in symlinksVisited
                            and symlink[1:] not in targets.keys()):  # skip leading '.'
                        logging.debug(f'symlink >>>{symlink}<<<')
                        linkTarget = os.readlink(symlink)
                        logging.debug(f'linkTarget >>>{linkTarget}<<<')
                        linkTarget = self._symlinkConvertRelToAbs(symlink, linkTarget)
                        (realPath, targets) = self._getRealPathAndSymlinks(linkTarget, targets)
                        if realPath not in realPaths:
                            realPaths.append(realPath)
                            logging.debug(f'realPaths: >>>{realPaths}<<<')
                        symlinksVisited.append(symlink)
                        finished = False
                if realPaths:
                    self._runRsync(realPaths, filterFilePath, verbose, dryRun)
        # Copy all symlinks that were not yet copied
        logging.debug('Final rsync call')
        self._runRsync([s for (s, t) in targets.items() if t],
                       filterFilePath, verbose, dryRun)
        logging.info(f"Remote copy of '{source}' finished.")
Esempio n. 9
0
class Verify():
    """ Verify various configuration settings """

    def __init__(self, ctx):
        self._functions = {
            "verify_ocp": self._verifyOcp,
            "verify_nfs": self._verifyNfs,
            "verify_nws4": self._verifyNws4,
            "verify_hdb": self._verifyHdb,
        }

        self._ctx = ctx

        self._cmdSshNfs  = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user,
                                  check=False, reuseCon=False)
        self._cmdSshNws4 = CmdSsh(ctx, ctx.cf.refsys.nws4.host.name, ctx.cr.refsys.nws4.sidadm,
                                  check=False, reuseCon=False)
        self._cmdSshHdb  = CmdSsh(ctx, ctx.cf.refsys.hdb.host.name, ctx.cr.refsys.hdb.sidadm,
                                  check=False, reuseCon=False)
        self._ocp        = Ocp(self._ctx, login="******", verify=True)

# Public methods

    def verify(self):
        """ Verify various configuration settings """

        success, msg = self._checkSsh()
        if not success:
            fail(msg)
        showMsgOk("SSH key setup is valid.")
        if self._ctx.ar.function in self._functions:
            return self._functions[self._ctx.ar.function]()

        success = self._verifyOcp()                  and success
        success = self._verifyImages()               and success
        success = self._verifyNws4()                 and success
        success = self._verifyHdb()                  and success
        success = self._verifyNfs()                  and success
        success = self._verifySapSystem()            and success

        return success

    # Private methods
    def _checkSsh(self):
        success = True
        msg = ''
        res = self._cmdSshNfs.run('true')
        if res.rc != 0:
            msg += self._cmdSshNfs.formatSshError(res,
                                                  self._ctx.cf.nfs.host.name,
                                                  self._ctx.cr.nfs.user)
            success = False

        res = self._cmdSshNws4.run('true')
        if res.rc != 0:
            msg += self._cmdSshNws4.formatSshError(res,
                                                   self._ctx.cf.refsys.nws4.host.name,
                                                   self._ctx.cr.refsys.nws4.sidadm)
            success = False

        res = self._cmdSshHdb.run('true')
        if res.rc != 0:
            msg += self._cmdSshHdb.formatSshError(res,
                                                  self._ctx.cf.refsys.hdb.host.name,
                                                  self._ctx.cr.refsys.hdb.sidadm)
            success = False

        return success, msg

    def _verifyOcp(self):
        """ Verify OCP settings """
        # pylint: disable=too-many-statements

        def areResourcesValid(ocp, containerType):
            return areContainerMemResourcesValid(ocp, containerType)

        def isSecretExisting(ctx):
            return ctx.cf.ocp.containers.di.secret in self._ocp.getSecret()

        def isHdbSecretValid(ctx):
            userInSecret = self._ocp.getHdbConnectSecretUser()
            userInCreds  = ctx.cr.refsys.nws4.hdbconnect
            if (
               not userInSecret.name     == userInCreds.name or
               not userInSecret.password == userInCreds.password
               ):
                return False
            return True

        def verifySetup():
            success = True
            if self._ocp.isDomainValid():
                showMsgOk("OCP domain name is valid.")
                if self._ocp.isCredentialsValid():
                    showMsgOk("OCP user and password are valid.")
                    if self._ocp.isProjectValid():
                        showMsgOk("OCP project is valid.")
                    else:
                        showMsgErr(f"OCP project '{ocp.project}' does not exist.")
                        success = False
                else:
                    showMsgErr(f"OCP user '{user.name}' and/or password are invalid.")
                    success = False
            else:
                showMsgErr(f"OCP domain name '{ocp.domain}' is invalid.")
                success = False

            return success

        def verifyResources(ocp):
            success = True
            for containerType in self._ctx.config.getContainerFlavors():
                if containerType == 'init':
                    continue
                if areResourcesValid(ocp, containerType):
                    showMsgOk("OCP memory resources for container type "
                              f"'{containerType}' are valid.")
                else:
                    showMsgErr(f"OCP memory limit for container type '{containerType}' "
                               f"is less than the value specified for requested memory.")
                    success = False
            return success

        def verifySecret(ocp):
            success = True
            if not refSystemIsStandard(self._ctx):
                secret = ocp.containers.di.secret
                if secret:
                    if isSecretExisting(self._ctx):
                        if isHdbSecretValid(self._ctx):
                            showMsgOk(f"OCP secret '{secret}' exists and is valid.")
                        else:
                            showMsgErr(f"Mismatch between generated secret '{secret}' "
                                       "and values specified in your credentials file.")
                            showMsgInd("Re-generate your secret by executing the tool "
                                       "'tools/ocp-hdb-secret-gen'")
                            success = False
                    else:
                        showMsgErr(f"Specified OCP secret '{secret}' "
                                   "was not found in OCP cluster.")
                        showMsgInd("Make sure the secret exists and is "
                                   "created in the right project.")
                        success = False
                else:
                    showMsgErr("Reference system is a distributed system.")
                    showMsgInd("You must specify the name of an OCP secret in the config.yaml file")
                    showMsgInd("containing the information about the "
                               "SAP HANA DB user and password.")
                    success = False

            return success

        ocp     = self._ctx.cf.ocp
        user    = self._ctx.cr.ocp.user

        success = verifySetup()
        success = success and verifyResources(ocp)
        success = success and verifySecret(ocp)

        return success

    def _verifyImages(self):
        """ verify Settings for images """

        def _isRpmFileForPackageAvailable(packageName, path):
            try:
                getRpmFileForPackage(packageName, path)
                return True
            except RpmFileNotFoundException as exp:
                print(exp.errorText)
                return False

        def _getImageTypes(ctx):
            return list(ctx.cf.images.__dict__)

        success = True

        defaultPackagesDir = self._ctx.cs.defaultPackagesDir

        for flavor in _getImageTypes(self._ctx):
            if flavor == "init":
                continue
            packages = getattr(self._ctx.cf.images, flavor).packages
            for package in packages:
                if package.dnfInstallable:
                    showMsgOk(f"Package {package.packageName} installable via dnf install.")
                else:
                    if _isRpmFileForPackageAvailable(package.packageName, defaultPackagesDir):
                        showMsgOk(f"Package {package.packageName} installable via rpm.")
                    else:
                        showMsgErr(f"Package {package.packageName} not found "
                                   "in {defaultPackagesDir}.")
                        success = False
        return success

    def _verifyNfs(self):
        """ Verify NFS settings """
        nfs     = self._ctx.cf.nfs
        user    = self._ctx.cr.nfs.user
        success = True

        if self._isHostNameValid(self._cmdSshNfs):
            showMsgOk("NFS host is valid.")
            if self._isUserValid(self._cmdSshNfs):
                showMsgOk("NFS user is valid.")
            else:
                showMsgErr(f"NFS user '{user.name}' is invalid "
                           f"or ssh is not set up correctly.")
                showMsgInd(f"Check first the existence of '{user.name}' on '{nfs.host.name}'.")
                showMsgInd(f"If exists, check the ssh connection by executing: "
                           f"ssh {user.name}@{nfs.host.name}")
                success = False
        else:
            showMsgErr(f"NFS host '{nfs.host.name}' is invalid.")
            success = False

        return success

    def _verifyNws4(self):
        """ Verify settings for reference system component 'nws4' """
        return self._verifyRefSys('nws4', self._cmdSshNws4)

    def _verifyHdb(self):
        """ Verify settings for reference system component 'hdb' """
        success = self._verifyRefSys('hdb', self._cmdSshNws4)
        if success:
            for baseDir in ['shared', 'data', 'log']:
                basePath = getattr(self._ctx.cf.refsys.hdb.base, baseDir)
                if self._isHdbBaseDirValid(baseDir):
                    showMsgOk(f"HDB base directory '{basePath}' is valid for {baseDir}.")
                else:
                    showMsgErr(f"HDB base directory '{basePath}' is invalid.")
                    success = False

        return success

    def _verifyRefSys(self, component, cmdSsh):
        """ Verify settings for given component' """
        compUp   = component.upper()
        sidU     = getattr(self._ctx.cf.refsys, component).sidU
        hostname = getattr(self._ctx.cf.refsys, component).host.name
        user     = getattr(self._ctx.cr.refsys, component).sidadm
        success  = True

        if self._isHostNameValid(cmdSsh):
            showMsgOk(f"{compUp} host is valid.")
            if self._isUserValid(cmdSsh):
                showMsgOk(f"{compUp} user is valid.")
                if self._isSidInUsrSapServices(cmdSsh, sidU):
                    showMsgOk(f"{compUp} SAP system ID is valid.")
                else:
                    showMsgErr(f"{compUp} SAP system ID is invalid.")
                    success = False
            else:
                showMsgErr(f"{compUp} user '{user.name}' is invalid "
                           f"or ssh is not set up correctly.")
                showMsgInd(f"Check first the existence of '{user.name}' on '{hostname}'.")
                showMsgInd(f"If exists, check the ssh connection by executing: "
                           f"ssh {user.name}@{hostname}")
                success = False
        else:
            showMsgErr(f"{compUp} host '{hostname}' is invalid.")
            success = False

        return success

    def _verifySapSystem(self):
        """ Verify SAP system setup """
        success = True
        if refSystemIsStandard(self._ctx):
            if not self._ctx.cf.refsys.nws4.host.name == self._ctx.cf.refsys.hdb.host.name:
                success = False
                showMsgErr(f"The HANADB database '{self._ctx.cf.refsys.hdb.sidU}' "
                           "must run on the same host as the NWS4 SAP System.")

        if not self._isHdbSidInDefaultPfl():
            showMsgErr("You must not use a different HANADB SAP System "
                       f"than specified for the NWS4 SAP System '{self._ctx.cf.refsys.nws4.sidU}'.")
            success = False
        return success

    def _isHostNameValid(self, cmdSsh):
        out = self._checkSshLogin(cmdSsh)
        return 'Could not resolve hostname' not in out

    def _isUserValid(self, cmdSsh):
        out = self._checkSshLogin(cmdSsh)
        return 'Permission denied' not in out and 'Connection reset' not in out

    def _checkSshLogin(self, cmdSsh):
        return cmdSsh.run('true').err

    def _isSidInUsrSapServices(self, cmdSsh, sidU):
        out = cmdSsh.run(f' grep {sidU} /usr/sap/sapservices | wc -l').err
        return not out.startswith('0')

    def _isDirValid(self, cmdSsh, directory):
        out = cmdSsh.run(f' ls {directory}').err
        return 'No such file or directory' not in out

    def _isHdbBaseDirValid(self, base):
        basePath = getattr(self._ctx.cf.refsys.hdb.base, base)
        out = self._cmdSshHdb.run(f' ls {basePath}').out
        return base in out

    def _isHdbSidInDefaultPfl(self):
        defaultPfl = f'/usr/sap/{self._ctx.cf.refsys.nws4.sidU}/SYS/profile/DEFAULT.PFL'
        out = self._cmdSshNws4.run(f' grep dbs/hdb/dbname {defaultPfl}').out
        return self._ctx.cf.refsys.hdb.sidU in out