def _runRsync(self, source, filterFilePath, verbose, dryRun):
     logging.debug(f'source: >>>{source}<<<')
     cmdShell = CmdShell()
     cmd = 'rsync -a --relative'
     cmd += f' -e "{self._rsyncSsh}"'
     if verbose > 0:
         cmd += ' -'+'v'*verbose
     cmd += f' -f "merge {filterFilePath}"'
     if dryRun:
         cmd += ' -n'
     if not isinstance(source, list):
         cmd += f' {self._user.name}@{self._host}:{source} ./'
         cmdShell.run(cmd, self._rsyncSshSecrets)
     else:
         with tempfile.NamedTemporaryFile(mode='w') as tfh:
             tfh.write("\n".join(str(fn) for fn in source))
             tfh.flush()
             logging.debug(f"Contents of file '{tfh.name}':")
             logging.debug('>>>')
             # pylint: disable=unspecified-encoding
             with open(tfh.name) as rfh:
                 logging.debug(rfh.read())
             logging.debug('<<<')
             cmd += f' -r --files-from={tfh.name}'
             cmd += f' {self._user.name}@{self._host}:/ ./'
             cmdShell.run(cmd, self._rsyncSshSecrets)
Пример #2
0
def getValidNfsServerAddress(ctx):
    """ Get valid NFS server address serving the OCP cluster net """

    # Get all IP addresses for NFS server
    allIps = getAllNfsServerIpAddresses(ctx)

    # Need an OC login to get worker node address
    ocp = Ocp(ctx, login="******", verify=True)
    worker = ocp.getWorkerNodeList()[0]

    # Run Python script tools/modules/nfs-ping-test on helper node (in-line)
    cmdShell = CmdShell()
    host = ctx.cf.ocp.helper.host.name
    user = ctx.cr.ocp.helper.user
    repo = ctx.cf.build.repo.root
    toolCmd = f'python3 - <{repo}/tools/modules/nfs-ping-test {worker} {allIps}'

    cmdSsh = CmdSsh(ctx, host, user, reuseCon=False)
    runCmd, secr = cmdSsh.getSshCmdAndSecrets(withLogin=True)

    ipAddr = cmdShell.run(f'{runCmd} {toolCmd}', secrets=secr).out
    logging.debug(
        f"Running shell cmd: '{runCmd} {toolCmd}' returns '{ipAddr}'")
    if ipAddr == 'None':
        message = "Could not identify valid IP address for the NFS server"
        message += f"on worker node {worker}.\n"
        fail(message)

    del ocp
    return ipAddr
Пример #3
0
def getPublicKey(ctx, hname, user):
    """ Get the public key from a public key file of a specifc user at a specific host """

    userFull = f'{user.name}@{hname}'

    # Prepare command execution and determine path of public key file

    if hname == getBuildHost().name:
        cmd = CmdShell()
        defaultPubKeyPath = f'{Path.home()}/.ssh/id_rsa.pub'
    else:
        cmd = CmdSsh(ctx, hname, user)
        defaultPubKeyPath = f'{getHomeDir(ctx, hname, user)}/.ssh/id_rsa.pub'

    if f'{user.sshid}':
        pubKeyPath = f'{user.sshid}.pub'
    else:
        pubKeyPath = defaultPubKeyPath

    # Read public key file

    res = cmd.run(f'cat {pubKeyPath}')

    if res.rc != 0:
        fail(
            f"Could not get public key record of user '{userFull}' from file '{pubKeyPath}'"
        )

    # Get the public key from the public key file content

    info = _PublicKeyInformation(hname, user, pubKeyPath)

    pubKeys = _PublicKeys(res.out, info, keepAll=False)

    logging.debug(f'pubKeys >>>\n{pubKeys}\n<<< pubKeys')

    if pubKeys.numKeys() == 0:
        fail(f'Public key file of {userFull} does not contain any key record')

    if pubKeys.numKeys() > 1:
        records = '\n\n'.join(pubKeys)
        fail(f"Public key file '{pubKeyPath}' of {userFull}"
             f" contains multiple records:\n\n{records}")

    pubKey = pubKeys.getKey(0)

    logging.debug(f'Public key for {userFull}: >>>\n{pubKey}\n<<<')

    return pubKey
class Builder():
    """ Build container images """

    # pylint: disable=too-many-instance-attributes

    def __init__(self, ctx):
        self._ctx = ctx
        self._host = None
        self._user = None
        self._cmdShell = CmdShell()
        self._cmdSsh = None
        self._remoteCopy = None
        self._flavor = None
        self._description = None

    def buildImage(self, sidU, host, user):
        """ Build image """

        # pylint: disable=too-many-locals,too-many-statements

        repoRoot = self._ctx.cf.build.repo.root
        buildTmpRoot = self._ctx.ar.temp_root
        buildDir = self._ctx.ar.build_directory
        keepFiles = self._ctx.ar.keep_files

        # Initialize ssh connection

        if not self._cmdSsh or host != self._host or user != self._user:
            # Initialize only if not yet initialized or if connection parameters have changed
            if self._cmdSsh:
                del self._cmdSsh
            self._cmdSsh = CmdSsh(self._ctx, host, user)

        # Initialize remote copy connection

        if not self._remoteCopy or host != self._host or user != self._user:
            # Initialize only if not yet initialized or if connection parameters have changed
            if self._remoteCopy:
                del self._remoteCopy
            self._remoteCopy = RemoteCopy(self._ctx, host, user)

        self._host = host
        self._user = user

        # System ID

        sidL = sidU.lower()

        logging.debug(f"sidU: '{sidU}'")
        logging.debug(f"sidL: '{sidL}'")

        # Directories

        dirs = types.SimpleNamespace()
        dirs.repoRoot = repoRoot
        if buildDir and len(buildDir) != 0:
            dirs.build = buildDir
        else:
            self._cmdShell.run(f'mkdir -p "{buildTmpRoot}"')
            dirs.build = self._cmdShell.run(
                f'mktemp -d -p "{buildTmpRoot}" '
                f'-t soos-build-{self._flavor}.XXXXXXXXXX').out
        dirs.usrSapReal = self._getUsrSapReal()
        dirs.sapmnt = self._ctx.cf.refsys.nws4.base.sapmnt

        self._setDirsFlavor(sidU, dirs)

        logging.debug(f"dirs: '{dirs}'")

        # Image properties

        image = types.SimpleNamespace()
        image.name = f'localhost/soos-{sidL}'
        image.version = 'latest'
        image.tag = f'{image.name}:{image.version}'
        image.date = date.today().strftime('%Y-%m-%d')
        image.description = self._description  # Must be set by derived class
        with pushd(dirs.repoRoot):
            image.commit = self._cmdShell.run('git log --pretty="%H" -1').out
            image.branch = self._cmdShell.run(
                'git rev-parse --abbrev-ref HEAD').out

        logging.debug(f"image: '{image}'")

        # OS user properties

        (sapadm, sidadm, sapsysGid) = self._getOsUserProperties(sidL)

        logging.debug(f"sapadm   : '{sapadm}'")
        logging.debug(f"sidadm   : '{sidadm}'")
        logging.debug(f"sapsysGid: '{sapsysGid}'")

        # Misc

        buildCmd = 'podman'
        remoteOs = 'linux' + self._cmdSsh.run('uname -m').out

        # Start build process

        with tempfile.TemporaryDirectory() as dirs.tmp:
            logging.debug(f"Created temporary directory '{dirs.tmp}'")
            self._cleanupAtStart(dirs, keepFiles)
            self._genBuildContext(sidU, dirs, sapadm, sidadm, sapsysGid, host,
                                  remoteOs)
            containerfile = self._genContainerfile(sidU, dirs, image, sapadm,
                                                   sidadm, sapsysGid)
            self._buildImage(buildCmd, dirs, image, containerfile)
            self._cleanupAtEnd(dirs)

    def _getUsrSapReal(self):
        # Check whether /usr/sap is a real directory or a symlink to another directory
        usrSapReal = self._cmdSsh.run('readlink /usr/sap').out
        if len(usrSapReal) != 0:
            logging.info(
                f"Detected that '/usr/sap' is a symbolic link to '{usrSapReal}'"
            )
        else:
            usrSapReal = '/usr/sap'
        logging.debug(f"usrSapReal: '{usrSapReal}'")
        return usrSapReal

    def _setDirsFlavor(self, sidU, dirs):
        # Flavor specific directories
        # pylint: disable=unused-argument
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _getOsUserProperties(self, sidL):
        # Get properties of sapadm and <sid>adm from remote host /etc/passwd

        sapadm = types.SimpleNamespace()
        (_d1, _d2, sapadm.uid, sapsysGid, sapadm.comment, sapadm.home,
         sapadm.shell
         ) = self._cmdSsh.run('grep "^sapadm:" /etc/passwd').out.split(':')

        sidadm = types.SimpleNamespace()
        (_d1, _d2, sidadm.uid, _d4, sidadm.comment, sidadm.home, sidadm.shell
         ) = self._cmdSsh.run(f'grep "^{sidL}adm:" /etc/passwd').out.split(':')

        logging.debug(f'Returning {sapadm}, {sidadm}, {sapsysGid}')

        return (sapadm, sidadm, sapsysGid)

    def _cleanupAtStart(self, dirs, keepFiles):
        # Remove previously copied files if not explicitly asked to keep them
        if not keepFiles:
            logging.info(
                f"##### Cleaning up build directoy '{dirs.build}' #####")
            with pushd(dirs.build):
                self._cmdShell.run('rm -rf ..?* .[!.]* *')

    def _genBuildContext(self, sidU, dirs, sapadm, sidadm, sapsysGid, host,
                         remoteOs):
        # Generate podman build context
        # pylint: disable=too-many-arguments
        filterFilePath = f'{dirs.tmp}/rsync-filter'
        logging.debug(f"filterFilePath: {filterFilePath}")
        try:
            # pylint: disable=invalid-name, unspecified-encoding
            with open(filterFilePath, 'w') as fh:
                print(self._getRsyncFilter(sidU, dirs, remoteOs), file=fh)
        except IOError:
            fail(f"Error writing to file {filterFilePath}")

        self._genBuildContextFlavor(sidU, dirs, sapadm, sidadm, sapsysGid,
                                    host, filterFilePath)

    def _getRsyncFilter(self, sidU, dirs, remoteOs):
        # Get filter for selective copy depending on flavor
        # pylint: disable=unused-argument
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _genBuildContextFlavor(self, sidU, dirs, sapadm, sidadm, sapsysGid,
                               host, filterFilePath):
        # Flavor dependent actions for build context generation
        # pylint: disable=unused-argument,too-many-arguments
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _genContainerfile(self, sidU, dirs, image, sapadm, sidadm, sapsysGid):
        # Generate containerfile from template depending on flavor
        # MUST RUN AFTER BUILD CONTEXT SETUP
        # pylint: disable=too-many-arguments
        logging.info("##### Generating Containerfile #####")

        sidL = sidU.lower()

        # Common parameters
        if dirs.usrSapReal != '/usr/sap':
            usrSapLinkCmd = f'ln -s {dirs.usrSapReal} /usr/sap'
        else:
            usrSapLinkCmd = 'true'

        # get optional packages
        packages = getattr(self._ctx.cf.images, self._flavor).packages
        pkgParams = self._getOptionalPackageParams(packages, dirs)

        params = {
            'IMAGE_BRANCH': image.branch,
            'IMAGE_COMMIT': image.commit,
            'IMAGE_DATE': image.date,
            'IMAGE_DESCRIPTION': image.description,
            'IMAGE_VERSION': image.version,
            'SAPADM_COMMENT': sapadm.comment,
            'SAPADM_HOME': sapadm.home,
            'SAPADM_SHELL': sapadm.shell,
            'SAPADM_UID': sapadm.uid,
            'SAPMNT': dirs.sapmnt,
            'SAPSYS_GID': sapsysGid,
            'sid': sidL,
            'SID': sidU,
            'SIDADM_COMMENT': sidadm.comment,
            'SIDADM_HOME': sidadm.home,
            'SIDADM_SHELL': sidadm.shell,
            'SIDADM_UID': sidadm.uid,
            'USR_SAP_REAL': dirs.usrSapReal,
            'USR_SAP_LINK_CMD': usrSapLinkCmd,
            'INSTALL_OPT_PACKAGES': pkgParams.installOptPackagesDnf,
            'COPY_OPT_PACKAGE_FILES': pkgParams.copyOptPackageFiles,
            'INSTALL_OPT_PACKAGE_FILES': pkgParams.installOptPackageFiles
        }

        params.update(self._getContainerfileParams(sidU, dirs))
        containerfile = f'{dirs.tmp}/containerfile'
        template = f'{dirs.repoRoot}/openshift/images/{self._flavor}/containerfile.template'
        genFileFromTemplate(template, containerfile, params)
        try:
            # pylint: disable=invalid-name, unspecified-encoding
            with open(containerfile) as fh:
                logging.debug(
                    f"Contents of '{containerfile}': >>>\n{fh.read()}<<<")
        except IOError:
            fail(f"Error reading from {containerfile}")
        return containerfile

    def _getContainerfileParams(self, sidU, dirs):
        # Non-common containerfile template parameters depending on flavor
        # pylint: disable=unused-argument
        fail(
            'This function must be overwritten by derived flavor specific builder class.'
        )

    def _buildImage(self, buildCmd, dirs, image, containerfile):
        # Build image
        # MUST RUN AFTER BUILD CONTEXT SETUP
        # pylint: disable=no-self-use
        logging.info("##### Building image #####")
        with pushd(dirs.build):
            self._cmdShell.run(
                f'{buildCmd} build -t {image.tag} -f "{containerfile}" .')

    def _getOptionalPackageParams(self, packages, dirs):
        # Check if optional packages must be installed
        # and set them
        pkgParams = types.SimpleNamespace()
        pkgParams.installOptPackagesDnf = ''
        pkgParams.copyOptPackageFiles = ''
        pkgParams.installOptPackageFiles = ''

        if len(packages) > 0:
            self._addDependencies(packages, pkgParams)
            self._addDnfInstallablePackages(packages, pkgParams)
            self._addRpmPackages(packages, pkgParams, dirs)
        return pkgParams

    def _addDependencies(self, packages, pkgParams):
        # Set dependencies for optional packages
        firstRun = pkgParams.installOptPackagesDnf == ""

        for package in packages:
            if len(package.dependencies) > 0:
                if firstRun:
                    pkgParams.installOptPackagesDnf = 'RUN  dnf -y install'
                    firstRun = False
                else:
                    pkgParams.installOptPackagesDnf += ' && \\' + '\n'
                    pkgParams.installOptPackagesDnf += '     dnf -y install'

                for dependency in package.dependencies:
                    logging.debug(f"Adding dependency '{dependency}' " +
                                  f"for package '{package.packageName}'")
                    pkgParams.installOptPackagesDnf += f' {dependency}'

    def _addDnfInstallablePackages(self, packages, pkgParams):
        # set all packages to be installed using dnf
        firstRun = pkgParams.installOptPackagesDnf == ""
        for package in packages:
            if package.dnfInstallable:
                logging.debug(
                    f'package {package.packageName} installable via dnf install'
                )

                if firstRun:
                    pkgParams.installOptPackagesDnf = 'RUN  dnf -y install'
                    firstRun = False
                else:
                    pkgParams.installOptPackagesDnf += ' && \\' + '\n'
                    pkgParams.installOptPackagesDnf += '     dnf -y install'
                if package.repository != "":
                    pkgParams.installOptPackagesDnf += f' --enablerepo={package.repository}'
                    logging.debug(
                        f'enabling repository    : {package.repository}')
                pkgParams.installOptPackagesDnf += f' {package.packageName}'

    def _addRpmPackages(self, packages, pkgParams, dirs):
        # set all packages which must be copied and installed using rpm
        firstRun = pkgParams.copyOptPackageFiles == ""
        for package in packages:
            if not package.dnfInstallable:
                logging.debug(
                    f'package {package.packageName} must be installed via rpm')
                if firstRun:
                    pkgParams.copyOptPackageFiles = 'COPY '
                    pkgParams.installOptPackageFiles = 'RUN  '
                    firstRun = False
                else:
                    pkgParams.copyOptPackageFiles += ' && \\' + '\n' + '     '
                    pkgParams.installOptPackageFiles += ' && \\' + '\n' + '     '

                try:
                    rpmFileName = getRpmFileForPackage(package.packageName,
                                                       dirs.defaultPackagesDir)
                    pkgParams.copyOptPackageFiles += f'{dirs.defaultPackagesDir}'
                    pkgParams.copyOptPackageFiles += f'/{rpmFileName} / '
                    pkgParams.installOptPackageFiles += f'rpm -i /{rpmFileName} && \\' + '\n'
                    pkgParams.installOptPackageFiles += f'     rm /{rpmFileName}'
                except RpmFileNotFoundException as exp:
                    fail(exp.errorText)

    def _cleanupAtEnd(self, dirs):
        # Cleanup after image build
        with pushd(dirs.repoRoot):
            # self._cmdShell.run(f'\\rm -rf {dirs.build}')
            pass
Пример #5
0
class AuthorizedKeys():
    """ Representation of an authorized_keys file of a specific user at a specific host """
    def __init__(self, ctx, hname, user):

        self._userFull = f'{user.name}@{hname}'
        self._authKeys = None

        # Prepare command execution

        self._cmdShell = CmdShell()
        self._cmdSsh = CmdSsh(ctx, hname, user)
        self._rsyncSsh, self._rsyncSshSecrets = self._cmdSsh.getSshCmdAndSecrets(
            withLogin=False)

        # Get the path to the remote authorized_keys file

        homeDir = getHomeDir(ctx, hname, user)

        if not homeDir:
            fail(
                f"Could not determine the home directory of '{self._userFull}'"
            )

        self._akPath = f'{homeDir}/.ssh/authorized_keys'

        # Read the authorized_keys file

        info = _PublicKeyInformation(hname, user, self._akPath)

        self._read(info)  # Sets self._authKeys

        logging.debug(
            f'self._authKeys >>>\n{self._authKeys}\n<<< self._authKeys')

    def __str__(self):
        return str(self._authKeys)

    def _read(self, info):
        """ Read the contents of the authorized_keys file """

        res = self._cmdSsh.run(f'cat {self._akPath}')

        if res.rc != 0:
            fail(
                f"Could not get the authorized keys '{self._akPath}' file of '{self._userFull}'"
            )

        self._authKeys = _PublicKeys(res.out, info, keepAll=True)

    def write(self):
        """ Write the contents of the authorized_keys file """

        # Write the contents to a temporary local file and transfer the file to
        # the remote user's .ssh directory using rsync
        # Keep a backup of the original authorized_keys file on the remote side

        with tempfile.NamedTemporaryFile(mode='w') as akFh:
            print(self._authKeys, file=akFh, flush=True)

            source = akFh.name
            target = self._akPath

            backupSuffix = '.bak'

            rsyncCmd = f'rsync -av -e "{self._rsyncSsh}"'
            rsyncCmd += f' --backup --suffix "{backupSuffix}"'
            rsyncCmd += f' "{source}" "{self._userFull}:{target}"'

            res = self._cmdShell.run(rsyncCmd, self._rsyncSshSecrets)

            if res.rc != 0:
                fail(f"Could not write the authorized keys file '{target}'"
                     f" of '{self._userFull}\n({res.err})")

    def add(self, keys):
        """ Add keys in a key list to the internal authorized_keys list """
        return self._authKeys.addKeys(keys)

    def remove(self, keys):
        """ Remove keys in a key list from internal authorized_keys list """
        return self._authKeys.removeKeys(keys)

    def numKeys(self):
        """ Return the number of keys in the internal authorized_keys key list """
        return self._authKeys.numKeys()
    def copy(self, source, filterFilePath=None, verbose=1, dryRun=False):
        """ Perform remote copy

        Parameters:

        source        : root of source directorytree; must be an absolute path
        filterFilePath: optional: path to file containing rsync filters;
                        if not supplied filter file path supplied to constructor will be used
        verbose       : optional: set rsync verbose level;
                        choices: [0, 1, 2, 3], corresponding to rsync verbose levels
                        [<none>, '-v', '-vv', '-vvv']
        dryRun        : optional: if set to True, perform a trial rsync run with no changes made'

        """

        if not filterFilePath:
            filterFilePath = self._filterFilePath

        logging.info(f"Remote copy of '{source}' started.")

        logging.debug(f'source         >>>{source        }<<<')
        logging.debug(f'filterFilePath >>>{filterFilePath}<<<')
        logging.debug(f'verbose        >>>{verbose       }<<<')
        logging.debug(f'dryRun         >>>{dryRun        }<<<')

        cmdShell = CmdShell()

        symlinksVisited = []
        existingSymlinks = cmdShell.run('find ./ -type l').out
        if existingSymlinks:
            # Do not follow local existing symlinks
            symlinksVisited = existingSymlinks.strip().split('\n')
        logging.debug(f'symlinksVisited >>>{symlinksVisited}<<<')
        # rsync root of source directory supplied on command line
        (realPath, targets) = self._getRealPathAndSymlinks(source, {})
        self._runRsync(realPath, filterFilePath, verbose, dryRun)
        # If root of source directory tree is a symlink itself append it to list of visited links
        logging.debug(f"source  : '{source}'")
        logging.debug(f"realPath: '{realPath}'")
        if realPath != source:
            # cmdShell.run(f'ln -s {realPath} .{source}')
            symlinksVisited.append(f'.{source}')
        # Recursively detect all symlinks and rsync their targets
        finished = False
        while not finished:
            finished = True
            symlinksFound = cmdShell.run('find ./ -type l').out
            logging.debug(f'symlinksFound >>>{symlinksFound}<<<')
            logging.debug(f'symlinksVisited >>>{symlinksVisited}<<<')
            if symlinksFound:
                symlinksFound = symlinksFound.strip().split('\n')
                realPaths = []
                for symlink in symlinksFound:
                    if (symlink not in symlinksVisited
                            and symlink[1:] not in targets.keys()):  # skip leading '.'
                        logging.debug(f'symlink >>>{symlink}<<<')
                        linkTarget = os.readlink(symlink)
                        logging.debug(f'linkTarget >>>{linkTarget}<<<')
                        linkTarget = self._symlinkConvertRelToAbs(symlink, linkTarget)
                        (realPath, targets) = self._getRealPathAndSymlinks(linkTarget, targets)
                        if realPath not in realPaths:
                            realPaths.append(realPath)
                            logging.debug(f'realPaths: >>>{realPaths}<<<')
                        symlinksVisited.append(symlink)
                        finished = False
                if realPaths:
                    self._runRsync(realPaths, filterFilePath, verbose, dryRun)
        # Copy all symlinks that were not yet copied
        logging.debug('Final rsync call')
        self._runRsync([s for (s, t) in targets.items() if t],
                       filterFilePath, verbose, dryRun)
        logging.info(f"Remote copy of '{source}' finished.")