Esempio n. 1
0
    def __exit__(self, exec_ty, exec_val, tb):

        self._context = False
        if exec_ty is None:
            fd, tname = tempfile.mkstemp(dir=os.path.dirname(self._filename))
            try:
                oldlines, oldentries = self._getOldContent()
                with io.open(fd, 'w', encoding='utf8') as f:
                    if self._section:
                        self._writeSection(f)
                    f.writelines(oldlines)
                    if self._entries:
                        self._writeEntries(f, oldentries)

                os.rename(tname, self._filename)

                if self._oldmod != os.stat(self._filename).st_mode:
                    os.chmod(self._filename, self._oldmod)

                if utils.get_selinux_enforce_mode() > -1:
                    try:
                        selinux.restorecon(self._filename)
                    except OSError:
                        pass  # No default label for file
            finally:
                if os.path.exists(tname):
                    os.remove(tname)
def selinux_restorecon(path):
    if have_selinux() and hasattr(selinux, "restorecon"):
        try:
            selinux.restorecon(path)
        except Exception, e:
            logging.debug("Restoring context for '%s' failed: %s",
                          path, str(e))
Esempio n. 3
0
    def __exit__(self, exec_ty, exec_val, tb):
        self._context = False
        if exec_ty is None:
            fd, tname = tempfile.mkstemp(dir=os.path.dirname(self._filename))
            try:
                oldlines = self._getOldContent()
                with io.open(fd, 'w', encoding='utf8') as f:
                    if self._section:
                        self._writeSection(f)
                    # if oldlines includes something that we have in
                    #  self._entries we need to write only the new value!
                    for fullline in oldlines:
                        line = fullline.replace(' ', '')
                        key = line.split("=")[0]
                        if key not in self._entries:
                            f.write(fullline)
                        else:
                            f.write(u'## commented out by vdsm\n')
                            f.write(u'# %s\n' % (fullline))
                    if self._entries:
                        self._writeEntries(f)

                os.rename(tname, self._filename)

                if self._oldmod != os.stat(self._filename).st_mode:
                    os.chmod(self._filename, self._oldmod)

                if utils.get_selinux_enforce_mode() > -1:
                    try:
                        selinux.restorecon(self._filename)
                    except OSError:
                        pass  # No default label for file
            finally:
                if os.path.exists(tname):
                    os.remove(tname)
Esempio n. 4
0
    def __exit__(self, exec_ty, exec_val, tb):

        self._context = False
        if exec_ty is None:
            fd, tname = tempfile.mkstemp(dir=os.path.dirname(self._filename))
            try:
                oldlines, oldentries = self._getOldContent()
                with os.fdopen(fd, 'w', ) as f:
                    if self._section:
                        self._writeSection(f)
                    f.writelines(oldlines)
                    if self._entries:
                        self._writeEntries(f, oldentries)

                if utils.isOvirtNode():
                    NodeCfg().unpersist(self._filename)
                os.rename(tname, self._filename)
                if utils.isOvirtNode():
                    NodeCfg().persist(self._filename)

                if self._oldmod != os.stat(self._filename).st_mode:
                    os.chmod(self._filename, self._oldmod)

                if selinux.is_selinux_enabled:
                    try:
                        selinux.restorecon(self._filename)
                    except OSError:
                        pass  # No default label for file
            finally:
                if os.path.exists(tname):
                    os.remove(tname)
Esempio n. 5
0
def symlink_atomically(srcpath, dstpath, force=False, preserve_context=True):
    """Create a symlink, optionally replacing dstpath atomically, optionally
    setting or preserving SELinux context."""

    dstdname = os.path.dirname(dstpath)
    dstbname = os.path.basename(dstpath)

    run_restorecon = False
    ctx = None

    if preserve_context and selinux.is_selinux_enabled() <= 0:
        preserve_context = False
    else:
        try:
            ret, ctx = selinux.lgetfilecon(dstpath)
            if ret < 0:
                raise RuntimeError("getfilecon(%r) failed" % dstpath)
        except OSError as e:
            if e.errno == errno.ENOENT:
                run_restorecon = True
            else:
                raise

    if not force:
        os.symlink(srcpath, dstpath)
        if preserve_context:
            selinux.restorecon(dstpath)
    else:
        dsttmp = None
        for attempt in range(tempfile.TMP_MAX):
            _dsttmp = tempfile.mktemp(
                prefix=dstbname + os.extsep, dir=dstdname)
            try:
                os.symlink(srcpath, _dsttmp)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # try again
                    continue
                raise
            else:
                dsttmp = _dsttmp
                break

        if dsttmp is None:
            raise IOError(
                errno.EEXIST,
                "No suitable temporary symlink could be created.")

        if preserve_context and not run_restorecon:
            selinux.lsetfilecon(dsttmp, ctx)

        try:
            os.rename(dsttmp, dstpath)
        except:
            # clean up
            os.remove(dsttmp)
            raise

        if run_restorecon:
            selinux.restorecon(dstpath)
Esempio n. 6
0
File: base.py Progetto: fennm/bcfg2
    def _set_secontext(self, entry, path=None):  # pylint: disable=R0911
        """ set the SELinux context of the file on disk according to the
        config"""
        if not HAS_SELINUX:
            return True

        if path is None:
            path = entry.get("name")
        context = entry.get("secontext")
        if not context:
            # no context listed
            return True
        secontext = selinux.lgetfilecon(path)[1].split(":")[2]
        if secontext in Bcfg2.Options.setup.secontext_ignore:
            return True
        try:
            if context == '__default__':
                selinux.restorecon(path)
                return True
            else:
                return selinux.lsetfilecon(path, context) == 0
        except OSError:
            err = sys.exc_info()[1]
            if err.errno == errno.EOPNOTSUPP:
                # Operation not supported
                if context != '__default__':
                    self.logger.debug("POSIX: Failed to set SELinux context "
                                      "for %s: %s" % (path, err))
                    return False
                return True
            err = sys.exc_info()[1]
            self.logger.error("POSIX: Failed to set or restore SELinux "
                              "context for %s: %s" % (path, err))
            return False
Esempio n. 7
0
    def _set_secontext(self, entry, path=None):
        """ set the SELinux context of the file on disk according to the
        config"""
        if not HAS_SELINUX:
            return True

        if path is None:
            path = entry.get("name")
        context = entry.get("secontext")
        if not context:
            # no context listed
            return True

        if context == '__default__':
            try:
                selinux.restorecon(path)
                rv = True
            except OSError:
                err = sys.exc_info()[1]
                self.logger.error("POSIX: Failed to restore SELinux context "
                                  "for %s: %s" % (path, err))
                rv = False
        else:
            try:
                rv = selinux.lsetfilecon(path, context) == 0
            except OSError:
                err = sys.exc_info()[1]
                self.logger.error("POSIX: Failed to restore SELinux context "
                                  "for %s: %s" % (path, err))
                rv = False
        return rv
Esempio n. 8
0
def overwrite_safely(path, content, preserve_mode=True, preserve_context=True):
    """Safely overwrite a file by creating a temporary file in the same
    directory, writing it, moving it over the original file, eventually
    preserving file mode and SELinux context."""

    path = os.path.realpath(path)
    dir_ = os.path.dirname(path)
    base = os.path.basename(path)

    fd = None
    f = None
    tmpname = None

    exists = os.path.exists(path)

    if preserve_context and selinux.is_selinux_enabled() <= 0:
        preserve_context = False

    try:
        fd, tmpname = tempfile.mkstemp(prefix=base + os.path.extsep,
                                       dir=dir_)

        if exists and preserve_mode:
            shutil.copymode(path, tmpname)

        if exists and preserve_context:
            ret, ctx = selinux.getfilecon(path)
            if ret < 0:
                raise RuntimeError("getfilecon(%r) failed" % path)

        f = os.fdopen(fd, "w")
        fd = None

        f.write(content)

        f.close()
        f = None

        os.rename(tmpname, path)

        if preserve_context:
            if exists:
                selinux.setfilecon(path, ctx)
            else:
                selinux.restorecon(path)

    finally:
        if f:
            f.close()
        elif fd:
            os.close(fd)
        if tmpname and os.path.isfile(tmpname):
            try:
                os.unlink(tmpname)
            except:
                pass
Esempio n. 9
0
 def writeConfFile(self, fileName, configuration):
     """Backs up the previous contents of the file referenced by fileName
     writes the new configuration and sets the specified access mode."""
     self._backup(fileName)
     open(fileName, "w").write(configuration)
     os.chmod(fileName, 0664)
     try:
         selinux.restorecon(fileName)
     except:
         logging.debug("ignoring restorecon error in case " "SElinux is disabled", exc_info=True)
Esempio n. 10
0
def silent_restorecon(path):
    """Execute selinux restorecon cmd to determined file
    Args
    path -- full path to file
    """

    try:
        if selinux.is_selinux_enabled():
            selinux.restorecon(path)
    except:
        __PRINT_AND_LOG("restorecon {p} failed".format(p=path), "error")
Esempio n. 11
0
def _silent_restorecon(path):
    """Execute selinux restorecon cmd to determined file

    Args
    path -- full path to file
    """

    try:
        if selinux.is_selinux_enabled():
            selinux.restorecon(path)
    except:
        _LOG.error("restorecon %s failed" % path)
Esempio n. 12
0
    def _silent_restorecon(self, path):
        """
        Execute selinux restorecon cmd to determined file

        Args
        path -- full path to file
        """

        try:
            selinux.restorecon(path)
        except:
            self.logger.error("restorecon %s failed" % path, exc_info=True)
Esempio n. 13
0
def copyfile(srcpath, dstpath, copy_mode_from_dst=True, run_restorecon=True):
    """Copy srcpath to dstpath.

    Abort operation if e.g. not enough space is available.  Attempt to
    atomically replace dstpath if it exists."""

    if issamefile(srcpath, dstpath, catch_stat_exceptions=OSError):
        return

    dstpath = os.path.abspath(dstpath)
    dstdname = os.path.dirname(dstpath)
    dstbname = os.path.basename(dstpath)

    srcfile = open(srcpath, "rb")
    dsttmpfile = tempfile.NamedTemporaryFile(
        prefix=dstbname + os.path.extsep, dir=dstdname, delete=False)

    mode_copied = False

    if copy_mode_from_dst:

        # attempt to copy mode from destination file (if it exists,
        # otherwise fall back to copying it from the source file below)

        try:
            shutil.copymode(dstpath, dsttmpfile.name)
            mode_copied = True
        except (shutil.Error, OSError):
            pass

    if not mode_copied:
        shutil.copymode(srcpath, dsttmpfile.name)

    data = None

    while data != "":
        data = srcfile.read(BLOCKSIZE)
        try:
            dsttmpfile.write(data)
        except:
            srcfile.close()
            dsttmpfile.close()
            os.unlink(dsttmpfile.name)
            raise

    srcfile.close()
    dsttmpfile.close()

    os.rename(dsttmpfile.name, dstpath)

    if run_restorecon and selinux.is_selinux_enabled() > 0:
        selinux.restorecon(dstpath)
Esempio n. 14
0
File: lvm.py Progetto: nirs/vdsm
def _install_file(src, dst):
    _log("Installing %s at %s", src, dst)
    tmpfile = _LVMLOCAL_CUR + ".tmp"
    shutil.copyfile(_LVMLOCAL_VDSM, tmpfile)
    try:
        selinux.restorecon(tmpfile)
        os.chmod(tmpfile, 0o644)
        os.rename(tmpfile, _LVMLOCAL_CUR)
    except:
        try:
            os.unlink(tmpfile)
        except Exception:
            _log("ERROR: cannot remove temporary file: %s", tmpfile)
        raise
Esempio n. 15
0
 def writeConfFile(self, fileName, configuration):
     '''Backs up the previous contents of the file referenced by fileName
     writes the new configuration and sets the specified access mode.'''
     self._backup(fileName)
     logging.debug('Writing to file %s configuration:\n%s' % (fileName,
                   configuration))
     with open(fileName, 'w') as confFile:
         confFile.write(configuration)
     os.chmod(fileName, 0o664)
     try:
         selinux.restorecon(fileName)
     except:
         logging.debug('ignoring restorecon error in case '
                       'SElinux is disabled', exc_info=True)
    def _getSSH(self):
        pkihelper = pkissh.PKIHelper()
        authorized_keys_line = pkihelper.getSSHkey(
            fqdn=self.environment[
                ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN
            ],
            ca_certs=self.environment[
                ohostedcons.EngineEnv.TEMPORARY_CERT_FILE
            ],
        )

        authorized_keys_file = os.path.join(
            os.path.expanduser('~root'),
            '.ssh',
            'authorized_keys'
        )

        content = pkihelper.mergeAuthKeysFile(
            authorized_keys_file, authorized_keys_line
        )
        with transaction.Transaction() as localtransaction:
            localtransaction.append(
                filetransaction.FileTransaction(
                    name=authorized_keys_file,
                    content=content,
                    mode=0o600,
                    owner='root',
                    enforcePermissions=True,
                    modifiedList=self.environment[
                        otopicons.CoreEnv.MODIFIED_FILES
                    ],
                )
            )

        if self._selinux_enabled:
            path = os.path.join(
                os.path.expanduser('~root'),
                '.ssh'
            )
            try:
                selinux.restorecon(path, recursive=True)
            except OSError as ex:
                self.logger.error(
                    _(
                        'Failed to refresh SELINUX context for {path}: {ex}'
                    ).format(
                        path=path,
                        ex=ex.message,
                    )
                )
Esempio n. 17
0
File: api.py Progetto: kanalun/vdsm
def updateGeoRepKeys(userName, geoRepPubKeys):
    try:
        userInfo = getpwnam(userName)
        homeDir = userInfo[5]
        uid = userInfo[2]
        gid = userInfo[3]
    except KeyError as e:
        raise ge.GlusterGeoRepUserNotFoundException(err=[str(e)])

    sshDir = homeDir + "/.ssh"
    authKeysFile = sshDir + "/authorized_keys"

    if not os.path.exists(sshDir):
        try:
            os.makedirs(sshDir, 0o700)
            os.chown(sshDir, uid, gid)
            if selinux.is_selinux_enabled():
                selinux.restorecon(sshDir)
        except OSError as e:
            raise ge.GlusterGeoRepPublicKeyWriteFailedException(err=[str(e)])

    newKeys = [" ".join(l.split()[:-1]) for l in geoRepPubKeys]
    newKeyDict = dict(zip(newKeys, geoRepPubKeys))

    try:
        with open(authKeysFile) as f:
            existingKeyLines = f.readlines()
    except IOError as e:
        if e.errno == errno.ENOENT:
            existingKeyLines = []
        else:
            raise ge.GlusterGeoRepPublicKeyWriteFailedException(err=[str(e)])

    try:
        existingKeys = [" ".join(l.split()[:-1]) for l in existingKeyLines]
        existingKeyDict = dict(zip(existingKeys, existingKeyLines))

        outLines = existingKeyLines
        outKeys = set(newKeyDict).difference(set(existingKeyDict))
        outLines.extend([newKeyDict[k] for k in outKeys if newKeyDict[k]])

        safeWrite(authKeysFile, ''.join(outLines))
        os.chmod(authKeysFile, 0o600)
        os.chown(authKeysFile, uid, gid)
        if selinux.is_selinux_enabled():
            selinux.restorecon(authKeysFile)
    except (IOError, OSError) as e:
        raise ge.GlusterGeoRepPublicKeyWriteFailedException(err=[str(e)])
Esempio n. 18
0
 def reset(self):
     root=self.graphdir
     try:
         self.d.info()
         raise ValueError("Docker daemon must be stopped before resetting storage")
     except (NoDockerDaemon, requests.exceptions.ConnectionError):
         pass
     util.check_call(["docker-storage-setup", "--reset"], stdout=DEVNULL)
     util.call(["umount", root + "/devicemapper"], stderr=DEVNULL)
     util.call(["umount", root + "/overlay"], stderr=DEVNULL)
     util.call(["umount", root + "/overlay2"], stderr=DEVNULL)
     shutil.rmtree(root)
     os.mkdir(root)
     try:
         selinux.restorecon(root.encode("utf-8"))
     except (TypeError, OSError):
         selinux.restorecon(root)
Esempio n. 19
0
File: lvm.py Progetto: EdDev/vdsm
def _install_file(src, dst):
    _log("Installing %s at %s", src, dst)
    tmpfile = _LVMLOCAL_CUR + ".tmp"
    shutil.copyfile(_LVMLOCAL_VDSM, tmpfile)
    try:
        # TODO: remove when we require selinux version that does not explode
        # when selinux is disabled.
        if selinux.is_selinux_enabled():
            selinux.restorecon(tmpfile)
        os.chmod(tmpfile, 0o644)
        os.rename(tmpfile, _LVMLOCAL_CUR)
    except:
        try:
            os.unlink(tmpfile)
        except Exception:
            _log("ERROR: cannot remove temporary file: %s", tmpfile)
        raise
Esempio n. 20
0
File: ifcfg.py Progetto: nirs/vdsm
    def writeConfFile(self, fileName, configuration):
        """Backs up the previous contents of the file referenced by fileName
        writes the new configuration and sets the specified access mode."""
        self._backup(fileName)
        configuration = self.CONFFILE_HEADER + "\n" + configuration

        logging.debug("Writing to file %s configuration:\n%s", fileName, configuration)
        with open(fileName, "w") as confFile:
            confFile.write(configuration)
        os.chmod(fileName, 0o664)

        try:
            # filname can be of 'unicode' type. restorecon calls into a C API
            # that needs a char *. Thus, it is necessary to encode unicode to
            # a utf-8 string.
            selinux.restorecon(fileName.encode("utf-8"))
        except:
            logging.debug("ignoring restorecon error in case " "SElinux is disabled", exc_info=True)
Esempio n. 21
0
def configure():
    """
    Set up the multipath daemon configuration to the known and
    supported state. The original configuration, if any, is saved
    """

    if os.path.exists(_CONF_FILE):
        backup = _CONF_FILE + '.' + time.strftime("%Y%m%d%H%M")
        shutil.copyfile(_CONF_FILE, backup)
        sys.stdout.write("Backup previous multipath.conf to %r\n" % backup)
        utils.persist(backup)

    with tempfile.NamedTemporaryFile(
            mode="wb",
            prefix=os.path.basename(_CONF_FILE) + ".tmp",
            dir=os.path.dirname(_CONF_FILE),
            delete=False) as f:
        try:
            f.write(_CONF_DATA)
            f.flush()
            if selinux.is_selinux_enabled():
                selinux.restorecon(f.name)
            os.chmod(f.name, 0o644)
            # On ovirt node multipath.conf is a bind mount and rename will fail
            # if we do not unpersist first, making this non-atomic.
            utils.unpersist(_CONF_FILE)
            os.rename(f.name, _CONF_FILE)
        except:
            os.unlink(f.name)
            raise

    utils.persist(_CONF_FILE)

    # Flush all unused multipath device maps
    utils.execCmd([constants.EXT_MULTIPATH, "-F"])

    try:
        service.service_reload("multipathd")
    except service.ServiceOperationError:
        status = service.service_status("multipathd", False)
        if status == 0:
            raise
Esempio n. 22
0
def hookAdd(glusterCmd, hookLevel, hookName, hookData, hookMd5Sum,
            enable=False):
    hookPath = os.path.join(_glusterHooksPath, glusterCmd, hookLevel)
    try:
        os.makedirs(hookPath)
        if selinux.is_selinux_enabled():
            try:
                selinux.restorecon(hookPath, recursive=True)
            except OSError:
                logging.error('restorecon %s failed', hookPath, exc_info=True)
    except OSError as e:
        if e.errno != errno.EEXIST:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHookAddFailedException(err=[errMsg])

    try:
        return _hookUpdateOrAdd(glusterCmd, hookLevel, hookName, hookData,
                                hookMd5Sum, update=False, enable=enable)
    except IOError as e:
        errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
        raise ge.GlusterHookAddFailedException(err=[errMsg])
Esempio n. 23
0
def configure():
    """
    Set up the multipath daemon configuration to the known and
    supported state. The original configuration, if any, is saved
    """

    if os.path.exists(_CONF_FILE):
        backup = _CONF_FILE + '.' + time.strftime("%Y%m%d%H%M")
        shutil.copyfile(_CONF_FILE, backup)
        sys.stdout.write("Backup previous multipath.conf to %r\n" % backup)

    with tempfile.NamedTemporaryFile(
            mode="wb",
            prefix=os.path.basename(_CONF_FILE) + ".tmp",
            dir=os.path.dirname(_CONF_FILE),
            delete=False) as f:
        try:
            f.write(_CONF_DATA)
            f.flush()
            selinux.restorecon(f.name)
            os.chmod(f.name, 0o644)
            os.rename(f.name, _CONF_FILE)
        except:
            os.unlink(f.name)
            raise

    # Flush all unused multipath device maps. 'multipath'
    # returns 1 if any of the devices is in use and unable to flush.
    try:
        commands.run([constants.EXT_MULTIPATH, "-F"])
    except cmdutils.Error:
        pass

    try:
        service.service_reload("multipathd")
    except service.ServiceOperationError:
        status = service.service_status("multipathd", False)
        if status == 0:
            raise
Esempio n. 24
0
def _safeWrite(fname, s):
    "Write s into fname atomically"

    t = tempfile.NamedTemporaryFile(delete=False)
    t.write(s)
    t.close()

    try:
        oldstat = os.stat(fname)
    except:
        oldstat = None

    shutil.move(t.name, fname)

    try:
        if oldstat is not None:
            os.chmod(fname, oldstat.st_mode)
            os.chown(fname, oldstat.st_uid, oldstat.st_gid)

        import selinux
        selinux.restorecon(fname)
    except OSError:
        logging.debug('trying to maintain file permissions', exc_info=True)
Esempio n. 25
0
    def writeConfFile(self, fileName, configuration):
        '''Backs up the previous contents of the file referenced by fileName
        writes the new configuration and sets the specified access mode.'''
        self._backup(fileName)
        configuration = self.CONFFILE_HEADER + '\n' + configuration

        # make sure that ifcfg files are never persisted by the node
        if self.unifiedPersistence and utils.isOvirtNode():
            node_fs.Config().unpersist(fileName)

        logging.debug('Writing to file %s configuration:\n%s', fileName,
                      configuration)
        with open(fileName, 'w') as confFile:
            confFile.write(configuration)
        os.chmod(fileName, 0o664)
        try:
            # filname can be of 'unicode' type. restorecon calls into a C API
            # that needs a char *. Thus, it is necessary to encode unicode to
            # a utf-8 string.
            selinux.restorecon(fileName.encode('utf-8'))
        except:
            logging.debug('ignoring restorecon error in case '
                          'SElinux is disabled', exc_info=True)
Esempio n. 26
0
File: nssdb.py Progetto: tiran/pki
    def convert_db(self):
        dbtype = self.get_dbtype()
        if dbtype is None:
            raise ValueError(
                "NSS database {} does not exist".format(self.directory)
            )
        elif dbtype == 'sql':
            raise ValueError(
                "NSS database {} already in SQL format".format(self.directory)
            )

        logger.info(
            "Convert NSSDB %s from DBM to SQL format", self.directory
        )

        basecmd = [
            'certutil',
            '-d', 'sql:{}'.format(self.directory),
            '-f', self.password_file,
        ]
        # See https://fedoraproject.org/wiki/Changes/NSSDefaultFileFormatSql
        cmd = basecmd + [
            '-N',
            '-@', self.password_file
        ]

        logger.debug('Command: %s', ' '.join(map(str, cmd)))
        subprocess.check_call(cmd)

        migration = (
            ('cert8.db', 'cert9.db'),
            ('key3.db', 'key4.db'),
            ('secmod.db', 'pkcs11.txt'),
        )

        for oldname, newname in migration:
            oldname = os.path.join(self.directory, oldname)
            newname = os.path.join(self.directory, newname)
            oldstat = os.stat(oldname)
            os.chmod(newname, stat.S_IMODE(oldstat.st_mode))
            os.chown(newname, oldstat.st_uid, oldstat.st_gid)

        if selinux is not None and selinux.is_selinux_enabled():
            selinux.restorecon(self.directory, recursive=True)

        # list certs to verify DB
        if self.get_dbtype() != 'sql':
            raise RuntimeError(
                "Migration of NSS database {} was not successfull.".format(
                    self.directory
                )
            )

        with open(os.devnull, 'wb') as f:
            subprocess.check_call(basecmd + ['-L'], stdout=f)

        for oldname, _ in migration:  # pylint: disable=unused-variable
            oldname = os.path.join(self.directory, oldname)
            os.rename(oldname, oldname + '.migrated')

        logger.info("Migration successful")
Esempio n. 27
0
 def restorecon(self, abspath):
     try:
         selinux.restorecon(abspath)
     except OSError:
         self._logger.warning('No default label: "%s"', abspath)
Esempio n. 28
0
    def convert_db(self):
        dbtype = self.get_dbtype()
        if dbtype is None:
            raise ValueError(
                "NSS database {} does not exist".format(self.directory)
            )
        elif dbtype == 'sql':
            raise ValueError(
                "NSS database {} already in SQL format".format(self.directory)
            )

        logger.info(
            "Convert NSSDB %s from DBM to SQL format", self.directory
        )

        basecmd = [
            'certutil',
            '-d', 'sql:{}'.format(self.directory),
            '-f', self.password_file,
        ]
        # See https://fedoraproject.org/wiki/Changes/NSSDefaultFileFormatSql
        cmd = basecmd + [
            '-N',
            '-@', self.password_file
        ]

        logger.debug('Command: %s', ' '.join(map(str, cmd)))
        subprocess.check_call(cmd)

        migration = (
            ('cert8.db', 'cert9.db'),
            ('key3.db', 'key4.db'),
            ('secmod.db', 'pkcs11.txt'),
        )

        for oldname, newname in migration:
            oldname = os.path.join(self.directory, oldname)
            newname = os.path.join(self.directory, newname)
            oldstat = os.stat(oldname)
            os.chmod(newname, stat.S_IMODE(oldstat.st_mode))
            os.chown(newname, oldstat.st_uid, oldstat.st_gid)

        if selinux is not None and selinux.is_selinux_enabled():
            selinux.restorecon(self.directory, recursive=True)

        # list certs to verify DB
        if self.get_dbtype() != 'sql':
            raise RuntimeError(
                "Migration of NSS database {} was not successfull.".format(
                    self.directory
                )
            )

        with open(os.devnull, 'wb') as f:
            subprocess.check_call(basecmd + ['-L'], stdout=f)

        for oldname, _ in migration:  # pylint: disable=unused-variable
            oldname = os.path.join(self.directory, oldname)
            os.rename(oldname, oldname + '.migrated')

        logger.info("Migration successful")
Esempio n. 29
0
def process_tb(config, logger, receive_dir, qdir_md5, duplicates, errors):

    # Check for results that are ready for processing: version 002 agents
    # upload the MD5 file as xxx.md5.check and they rename it to xxx.md5
    # after they are done with MD5 checking so that's what we look for.
    list_check = glob.glob(
        os.path.join(receive_dir, "**", "*.tar.xz.md5"), recursive=True
    )

    archive = config.ARCHIVE
    logger.info("{}", config.TS)
    list_check.sort()
    nstatus = ""

    ntotal = ntbs = nerrs = nquarantined = ndups = 0

    for tbmd5 in list_check:
        ntotal += 1

        # full pathname of tarball
        tb = Path(tbmd5[0:-4])
        tbmd5 = Path(tbmd5)

        # directory
        tbdir = tb.parent

        # resultname: get the basename foo.tar.xz and then strip the .tar.xz
        resultname = tb.name

        controller = tbdir.name
        dest = archive / controller

        # Create a new dataset tracker in UPLOADING state, and add it to the
        # database.
        #
        # NOTE: Technically, this particular workflow has no "UPLOADING" as
        # the `pbench-server-prep-shim-002` command isn't invoked until the
        # tarball and MD5 has been entirely uploaded by the agent via `ssh`;
        # this method however can't be supported once we have authorized user
        # ownership, and the model fits the server `PUT` method where an
        # unexpected termination could leave a tarball in "Uploading" state.
        #
        # TODO: We have no way to identify an owner here, so assign it to
        # the arbitrary "pbench" user. This will go away when we drop this
        # component entirely in favor of PUT.
        try:
            dataset = Dataset.create(
                controller=controller, path=resultname, owner="pbench"
            )
        except DatasetError as e:
            logger.error(
                "Unable to create dataset {}>{}: {}", controller, resultname, str(e)
            )
            # TODO: Should we quarantine over this? Note it's not quite
            # straightforward, as quarantine() expects that the Dataset has
            # been created, so we'll get a cascade failure. Since prep-shim's
            # days are numbered, I'm inclined not to worry about it here.
            dataset = None

        if all([(dest / resultname).is_file(), (dest / tbmd5.name).is_file()]):
            logger.error("{}: Duplicate: {} duplicate name", config.TS, tb)
            quarantine((duplicates / controller), logger, tb, tbmd5)
            ndups += 1
            continue

        archive_tar_hex_value, archive_md5_hex_value = md5_check(tb, tbmd5, logger)
        if any(
            [
                archive_tar_hex_value != archive_md5_hex_value,
                archive_tar_hex_value is None,
                archive_md5_hex_value is None,
            ]
        ):
            logger.error("{}: Quarantined: {} failed MD5 check", config.TS, tb)
            logger.info("{}: FAILED", tb.name)
            logger.info("md5sum: WARNING: 1 computed checksum did NOT match")
            quarantine((qdir_md5 / controller), logger, tb, tbmd5)
            nquarantined += 1
            continue

        if dataset:
            try:
                dataset.md5 = archive_md5_hex_value
                dataset.update()
            except DatasetError as e:
                logger.warn(
                    "Unable to update dataset {} with md5: {}", str(dataset), str(e)
                )

        # make the destination directory and its TODO subdir if necessary.
        try:
            os.makedirs(dest / "TODO")
        except FileExistsError:
            # directory already exists, ignore
            pass
        except Exception:
            logger.error("{}: Error in creating TODO directory.", config.TS)
            quarantine(os.path.join(errors, controller), logger, tb, tbmd5)
            nerrs += 1
            continue

        # First, copy the small .md5 file to the destination. That way, if
        # that operation fails it will fail quickly since the file is small.
        try:
            shutil.copy2(tbmd5, dest)
        except Exception:
            logger.error(
                "{}: Error in copying .md5 file to Destination path.", config.TS
            )
            try:
                os.remove(dest / tbmd5.name)
            except FileNotFoundError:
                logger.error(
                    "{}: Warning: cleanup of copy failure failed itself.", config.TS
                )
            quarantine((errors / controller), logger, tb, tbmd5)
            nerrs += 1
            continue

        # Next, mv the "large" tar ball to the destination. If the destination
        # is on the same device, the move should be quick. If the destination is
        # on a different device, the move will be a copy and delete, and will
        # take a bit longer.  If it fails, the file will NOT be at the
        # destination.
        try:
            shutil.move(str(tb), str(dest))
        except Exception:
            logger.error(
                "{}: Error in moving tarball file to Destination path.", config.TS
            )
            try:
                os.remove(dest / resultname)
            except FileNotFoundError:
                logger.error(
                    "{}: Warning: cleanup of copy failure failed itself.", config.TS
                )
            quarantine((errors / controller), logger, tb, tbmd5)
            nerrs += 1
            continue

        # Restore the SELinux context properly
        try:
            selinux.restorecon(dest / tb.name)
            selinux.restorecon(dest / tbmd5.name)
        except Exception as e:
            # log it but do not abort
            logger.error("{}: Error: 'restorecon {}', {}", config.TS, dest / tb.name, e)

        # Now that we have successfully moved the tar ball and its .md5 to the
        # destination, we can remove the original .md5 file.
        try:
            os.remove(tbmd5)
        except Exception as exc:
            logger.error(
                "{}: Warning: cleanup of successful copy operation failed: '{}'",
                config.TS,
                exc,
            )

        try:
            os.symlink((dest / resultname), (dest / "TODO" / resultname))
        except Exception as exc:
            logger.error("{}: Error in creation of symlink. '{}'", config.TS, exc)
            # if we fail to make the link, we quarantine the (already moved)
            # tarball and .md5.
            quarantine(
                (errors / controller), logger, (dest / tb), (dest / tbmd5),
            )
            nerrs += 1
            continue

        ntbs += 1

        try:
            if dataset:
                dataset.advance(States.UPLOADED)
        except Exception:
            logger.exception("Unable to finalize {}", dataset)

        nstatus = f"{nstatus}{config.TS}: processed {tb}\n"
        logger.info(f"{tb.name}: OK")

    return Results(
        nstatus=nstatus,
        ntotal=ntotal,
        ntbs=ntbs,
        nquarantined=nquarantined,
        ndups=ndups,
        nerrs=nerrs,
    )
Esempio n. 30
0
 def _generateSPICEcerts(self):
     # 'https://fedoraproject.org/wiki/
     # QA:Testcase_Virtualization_Manually_
     # set_spice_listening_port_with_TLS_port_set'
     self.logger.info(_('Generating libvirt-spice certificates'))
     self._tmpdir = tempfile.mkdtemp()
     expire = '1095'  # FIXME: configurable?
     # FIXME: configurable?
     for key in ('ca-key.pem', 'server-key.pem'):
         self.execute((self.command.get('openssl'), 'genrsa', '-out',
                       os.path.join(self._tmpdir, key), '1024'),
                      raiseOnError=True)
     self.execute(
         (self.command.get('openssl'), 'req', '-new', '-x509', '-days',
          expire, '-key', os.path.join(self._tmpdir, 'ca-key.pem'), '-out',
          os.path.join(self._tmpdir, 'ca-cert.pem'), '-subj',
          self.environment[ohostedcons.VDSMEnv.CA_SUBJECT]),
         raiseOnError=True)
     self.execute((self.command.get('openssl'), 'req', '-new', '-key',
                   os.path.join(self._tmpdir, 'server-key.pem'), '-out',
                   os.path.join(self._tmpdir, 'server-key.csr'), '-subj',
                   self.environment[ohostedcons.VDSMEnv.PKI_SUBJECT]),
                  raiseOnError=True)
     self.execute((
         self.command.get('openssl'),
         'x509',
         '-req',
         '-days',
         expire,
         '-in',
         os.path.join(self._tmpdir, 'server-key.csr'),
         '-CA',
         os.path.join(self._tmpdir, 'ca-cert.pem'),
         '-CAkey',
         os.path.join(self._tmpdir, 'ca-key.pem'),
         '-set_serial',
         '01',
         '-out',
         os.path.join(self._tmpdir, 'server-cert.pem'),
     ),
                  raiseOnError=True)
     pem_files = glob.glob(os.path.join(self._tmpdir, '*.pem'))
     cert_dir = os.path.dirname(
         ohostedcons.FileLocations.LIBVIRT_SPICE_SERVER_CERT)
     if not os.path.exists(cert_dir):
         os.makedirs(cert_dir)
     for src in pem_files:
         dest = os.path.join(cert_dir, os.path.basename(src))
         shutil.move(src, dest)
         os.chmod(dest, 0o640)
         os.chown(dest, self.environment[ohostedcons.VDSMEnv.VDSM_UID],
                  self.environment[ohostedcons.VDSMEnv.KVM_GID])
     if self._selinux_enabled:
         try:
             selinux.restorecon(cert_dir, recursive=True)
         except OSError as ex:
             self.logger.error(
                 _('Failed to refresh SELINUX context for {path}: {ex}').
                 format(
                     path=cert_dir,
                     ex=ex.message,
                 ))
Esempio n. 31
0
 def restorecon(self, path):
     selinux.restorecon(str(path), recursive=1)
Esempio n. 32
0
 def restore_context(self, mdict):
     selinux.restorecon(mdict['pki_instance_path'], True)
     selinux.restorecon(config.PKI_DEPLOYMENT_LOG_ROOT, True)
     selinux.restorecon(mdict['pki_instance_log_path'], True)
     selinux.restorecon(mdict['pki_instance_configuration_path'], True)
Esempio n. 33
0
def createBrick(brickName,
                mountPoint,
                devNameList,
                fsType=DEFAULT_FS_TYPE,
                raidParams={}):
    def _getDeviceList(devNameList):
        return [
            blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1])
            for devName in devNameList
        ]

    def _createPV(deviceList, alignment):
        for dev in deviceList:
            # bz#1178705: Blivet always creates pv with 1MB dataalignment
            # Workaround: Till blivet fixes the issue, we use lvm pvcreate
            rc, out, err = commands.execCmd([
                _pvCreateCommandPath.cmd, '--dataalignment',
                '%sk' % alignment, dev.path
            ])
            if rc:
                raise ge.GlusterHostStorageDevicePVCreateFailedException(
                    dev.path, alignment, rc, out, err)
        _reset_blivet(blivetEnv)
        return _getDeviceList([dev.name for dev in deviceList])

    def _createVG(vgName, deviceList, stripeSize):
        # bz#1198568: Blivet always creates vg with 1MB stripe size
        # Workaround: Till blivet fixes the issue, use vgcreate command
        devices = ','.join([device.path for device in deviceList])
        rc, out, err = commands.execCmd([
            _vgCreateCommandPath.cmd, '-s',
            '%sk' % stripeSize, vgName, devices
        ])
        if rc:
            raise ge.GlusterHostStorageDeviceVGCreateFailedException(
                vgName, devices, stripeSize, rc, out, err)
        blivetEnv.reset()
        return blivetEnv.devicetree.getDeviceByName(vgName)

    def _createThinPool(poolName, vg, alignment, poolMetaDataSize,
                        poolDataSize):
        metaName = "meta-%s" % poolName
        vgPoolName = "%s/%s" % (vg.name, poolName)
        metaLv = LVMLogicalVolumeDevice(metaName,
                                        parents=[vg],
                                        size=blivet.size.Size(
                                            '%d KiB' % poolMetaDataSize))
        poolLv = LVMLogicalVolumeDevice(poolName,
                                        parents=[vg],
                                        size=blivet.size.Size('%d KiB' %
                                                              poolDataSize))
        blivetEnv.createDevice(metaLv)
        blivetEnv.createDevice(poolLv)
        blivetEnv.doIt()

        # bz#1100514: LVM2 currently only supports physical extent sizes
        # that are a power of 2. Till that support is available we need
        # to use lvconvert to achive that.
        # bz#1179826: blivet doesn't support lvconvert functionality.
        # Workaround: Till the bz gets fixed, lvconvert command is used
        rc, out, err = commands.execCmd([
            _lvconvertCommandPath.cmd, '--chunksize',
            '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata',
            "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y'
        ])

        if rc:
            raise ge.GlusterHostStorageDeviceLVConvertFailedException(
                vg.path, alignment, rc, out, err)
        rc, out, err = commands.execCmd(
            [_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName])
        if rc:
            raise ge.GlusterHostStorageDeviceLVChangeFailedException(
                vgPoolName, rc, out, err)
        _reset_blivet(blivetEnv)
        return blivetEnv.devicetree.getDeviceByName(poolLv.name)

    if os.path.ismount(mountPoint):
        raise ge.GlusterHostStorageMountPointInUseException(mountPoint)

    vgName = "vg-" + brickName
    poolName = "pool-" + brickName
    poolDataSize = 0
    count = 0
    raidType = raidParams.get('type')
    metaDataSizeKib = DEFAULT_METADATA_SIZE_KB
    if raidType == '6':
        count = raidParams['pdCount'] - 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = alignment
    elif raidType == '10':
        count = raidParams['pdCount'] // 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = DEFAULT_CHUNK_SIZE_KB
    else:  # Device type is JBOD
        alignment = DEFAULT_CHUNK_SIZE_KB
        chunkSize = DEFAULT_CHUNK_SIZE_KB

    blivetEnv = blivet.Blivet()
    _reset_blivet(blivetEnv)

    # get the devices list from the device name
    deviceList = _getDeviceList(devNameList)

    # raise an error when any device not actually found in the given list
    notFoundList = set(devNameList).difference(
        set([dev.name for dev in deviceList]))
    if notFoundList:
        raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList)

    # raise an error when any device is used already in the given list
    inUseList = set(devNameList).difference(
        set([not _canCreateBrick(dev) or dev.name for dev in deviceList]))
    if inUseList:
        raise ge.GlusterHostStorageDeviceInUseException(inUseList)

    pvDeviceList = _createPV(deviceList, alignment)
    vg = _createVG(vgName, pvDeviceList, alignment)
    # The following calculation is based on the redhat storage performance doc
    # http://docbuilder.usersys.redhat.com/22522
    # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance

    # create ~16GB metadata LV (metaDataSizeKib) that has a size which is
    # a multiple of RAID stripe width if it is > minimum vg size
    # otherwise allocate a minimum of 0.5% of the data device size
    # and create data LV (poolDataSize) that has a size which is
    # a multiple of stripe width.
    vgSizeKib = int(_getDeviceSize(vg, 'KiB'))
    if _getDeviceSize(vg) < MIN_VG_SIZE:
        metaDataSizeKib = vgSizeKib * MIN_METADATA_PERCENT
    poolDataSize = vgSizeKib - metaDataSizeKib

    metaDataSizeKib = (metaDataSizeKib - (metaDataSizeKib % alignment))
    poolDataSize = (poolDataSize - (poolDataSize % alignment))

    # Creating a thin pool from the data LV and the metadata LV
    # lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool
    #     --poolmetadata VOLGROUP/metadata_device_name
    pool = _createThinPool(poolName, vg, chunkSize, metaDataSizeKib,
                           poolDataSize)
    # Size of the thin LV should be same as the size of Thinpool to avoid
    # over allocation. Refer bz#1412455 for more info.
    if six.PY2:
        thinlv = LVMThinLogicalVolumeDevice(brickName,
                                            parents=[pool],
                                            size=blivet.size.Size(
                                                '%d KiB' % poolDataSize),
                                            grow=True)
    else:
        thinlv = LVMLogicalVolumeDevice(brickName,
                                        parents=[pool],
                                        size=blivet.size.Size('%d KiB' %
                                                              poolDataSize),
                                        grow=True,
                                        seg_type="thin")

    blivetEnv.createDevice(thinlv)
    blivetEnv.doIt()

    if fsType != DEFAULT_FS_TYPE:
        log.error("fstype %s is currently unsupported" % fsType)
        raise ge.GlusterHostStorageDeviceMkfsFailedException(fsType)

    if six.PY2:
        get_format = blivet.formats.getFormat  # pylint: disable=no-member
    else:
        get_format = blivet.formats.get_format  # pylint: disable=no-member

    format = get_format(DEFAULT_FS_TYPE,
                        device=thinlv.path,
                        mountopts=DEFAULT_MOUNT_OPTIONS)
    format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"]
    if raidParams.get('type') == '6':
        format._defaultFormatOptions += [
            "-d", "sw=%s,su=%sk" % (count, raidParams.get('stripeSize'))
        ]
    blivetEnv.formatDevice(thinlv, format)
    blivetEnv.doIt()

    try:
        os.makedirs(mountPoint)
    except OSError as e:
        if errno.EEXIST != e.errno:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHostStorageDeviceMakeDirsFailedException(
                err=[errMsg])
    thinlv.format.setup(mountpoint=mountPoint)
    blivetEnv.doIt()

    # bz#1230495: lvm devices are invisible and appears only after vgscan
    # Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices
    rc, out, err = commands.execCmd([_vgscanCommandPath.cmd])
    if rc:
        raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err)
    fstab.FsTab().add(thinlv.path,
                      mountPoint,
                      DEFAULT_FS_TYPE,
                      mntOpts=[DEFAULT_MOUNT_OPTIONS])

    # If selinux is enabled, set correct selinux labels on the brick.
    if selinux.is_selinux_enabled():
        rc, out, err = commands.execCmd([
            _semanageCommandPath.cmd, 'fcontext', '-a', '-t',
            'glusterd_brick_t', mountPoint
        ])
        if rc:
            raise ge.GlusterHostFailedToSetSelinuxContext(
                mountPoint, rc, out, err)
        try:
            # mountPoint can be of 'unicode' type when its passed through
            # jsonrpc. restorecon calls into a C API that needs a char *.
            # Thus, it is necessary to encode unicode to a utf-8 string.
            selinux.restorecon(mountPoint.encode('utf-8'), recursive=True)
        except OSError as e:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHostFailedToRunRestorecon(mountPoint, err=errMsg)
    return _getDeviceDict(thinlv)
Esempio n. 34
0
 def restorecon(self, path):
     selinux.restorecon(str(path), recursive=1)
Esempio n. 35
0
def createBrick(brickName, mountPoint, devNameList, fsType=DEFAULT_FS_TYPE,
                raidParams={}):
    def _getDeviceList(devNameList):
        return [blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1])
                for devName in devNameList]

    def _createPV(deviceList, alignment):
        for dev in deviceList:
            # bz#1178705: Blivet always creates pv with 1MB dataalignment
            # Workaround: Till blivet fixes the issue, we use lvm pvcreate
            rc, out, err = commands.execCmd([_pvCreateCommandPath.cmd,
                                             '--dataalignment',
                                             '%sk' % alignment,
                                             dev.path])
            if rc:
                raise ge.GlusterHostStorageDevicePVCreateFailedException(
                    dev.path, alignment, rc, out, err)
        _reset_blivet(blivetEnv)
        return _getDeviceList([dev.name for dev in deviceList])

    def _createVG(vgName, deviceList, stripeSize):
        # bz#1198568: Blivet always creates vg with 1MB stripe size
        # Workaround: Till blivet fixes the issue, use vgcreate command
        devices = ','.join([device.path for device in deviceList])
        rc, out, err = commands.execCmd([_vgCreateCommandPath.cmd,
                                         '-s', '%sk' % stripeSize,
                                         vgName, devices])
        if rc:
            raise ge.GlusterHostStorageDeviceVGCreateFailedException(
                vgName, devices, stripeSize, rc, out, err)
        blivetEnv.reset()
        return blivetEnv.devicetree.getDeviceByName(vgName)

    def _createThinPool(poolName, vg, alignment,
                        poolMetaDataSize, poolDataSize):
        metaName = "meta-%s" % poolName
        vgPoolName = "%s/%s" % (vg.name, poolName)
        metaLv = LVMLogicalVolumeDevice(
            metaName, parents=[vg],
            size=blivet.size.Size('%d KiB' % poolMetaDataSize))
        poolLv = LVMLogicalVolumeDevice(
            poolName, parents=[vg],
            size=blivet.size.Size('%d KiB' % poolDataSize))
        blivetEnv.createDevice(metaLv)
        blivetEnv.createDevice(poolLv)
        blivetEnv.doIt()

        # bz#1100514: LVM2 currently only supports physical extent sizes
        # that are a power of 2. Till that support is available we need
        # to use lvconvert to achive that.
        # bz#1179826: blivet doesn't support lvconvert functionality.
        # Workaround: Till the bz gets fixed, lvconvert command is used
        rc, out, err = commands.execCmd([_lvconvertCommandPath.cmd,
                                         '--chunksize', '%sK' % alignment,
                                         '--thinpool', vgPoolName,
                                         '--poolmetadata',
                                         "%s/%s" % (vg.name, metaName),
                                         '--poolmetadataspar', 'n', '-y'])

        if rc:
            raise ge.GlusterHostStorageDeviceLVConvertFailedException(
                vg.path, alignment, rc, out, err)
        rc, out, err = commands.execCmd([_lvchangeCommandPath.cmd,
                                         '--zero', 'n', vgPoolName])
        if rc:
            raise ge.GlusterHostStorageDeviceLVChangeFailedException(
                vgPoolName, rc, out, err)
        _reset_blivet(blivetEnv)
        return blivetEnv.devicetree.getDeviceByName(poolLv.name)

    if os.path.ismount(mountPoint):
        raise ge.GlusterHostStorageMountPointInUseException(mountPoint)

    vgName = "vg-" + brickName
    poolName = "pool-" + brickName
    poolDataSize = 0
    count = 0
    raidType = raidParams.get('type')
    metaDataSizeKib = DEFAULT_METADATA_SIZE_KB
    if raidType == '6':
        count = raidParams['pdCount'] - 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = alignment
    elif raidType == '10':
        count = raidParams['pdCount'] / 2
        alignment = raidParams['stripeSize'] * count
        chunkSize = DEFAULT_CHUNK_SIZE_KB
    else:  # Device type is JBOD
        alignment = DEFAULT_CHUNK_SIZE_KB
        chunkSize = DEFAULT_CHUNK_SIZE_KB

    blivetEnv = blivet.Blivet()
    _reset_blivet(blivetEnv)

    # get the devices list from the device name
    deviceList = _getDeviceList(devNameList)

    # raise an error when any device not actually found in the given list
    notFoundList = set(devNameList).difference(
        set([dev.name for dev in deviceList]))
    if notFoundList:
        raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList)

    # raise an error when any device is used already in the given list
    inUseList = set(devNameList).difference(set([not _canCreateBrick(
        dev) or dev.name for dev in deviceList]))
    if inUseList:
        raise ge.GlusterHostStorageDeviceInUseException(inUseList)

    pvDeviceList = _createPV(deviceList, alignment)
    vg = _createVG(vgName, pvDeviceList, alignment)
    # The following calculation is based on the redhat storage performance doc
    # http://docbuilder.usersys.redhat.com/22522
    # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance

    # create ~16GB metadata LV (metaDataSizeKib) that has a size which is
    # a multiple of RAID stripe width if it is > minimum vg size
    # otherwise allocate a minimum of 0.5% of the data device size
    # and create data LV (poolDataSize) that has a size which is
    # a multiple of stripe width.
    vgSizeKib = int(_getDeviceSize(vg, 'KiB'))
    if _getDeviceSize(vg) < MIN_VG_SIZE:
        metaDataSizeKib = vgSizeKib * MIN_METADATA_PERCENT
    poolDataSize = vgSizeKib - metaDataSizeKib

    metaDataSizeKib = (metaDataSizeKib - (metaDataSizeKib % alignment))
    poolDataSize = (poolDataSize - (poolDataSize % alignment))

    # Creating a thin pool from the data LV and the metadata LV
    # lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool
    #     --poolmetadata VOLGROUP/metadata_device_name
    pool = _createThinPool(poolName, vg, chunkSize, metaDataSizeKib,
                           poolDataSize)
    thinlv = LVMThinLogicalVolumeDevice(brickName, parents=[pool],
                                        size=vg.size, grow=True)
    blivetEnv.createDevice(thinlv)
    blivetEnv.doIt()

    if fsType != DEFAULT_FS_TYPE:
        log.error("fstype %s is currently unsupported" % fsType)
        raise ge.GlusterHostStorageDeviceMkfsFailedException(
            thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType)

    format = blivet.formats.getFormat(DEFAULT_FS_TYPE, device=thinlv.path,
                                      mountopts=DEFAULT_MOUNT_OPTIONS)
    format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"]
    if raidParams.get('type') == '6':
        format._defaultFormatOptions += ["-d", "sw=%s,su=%sk" % (
            count, raidParams.get('stripeSize'))]
    blivetEnv.formatDevice(thinlv, format)
    blivetEnv.doIt()

    try:
        os.makedirs(mountPoint)
    except OSError as e:
        if errno.EEXIST != e.errno:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHostStorageDeviceMakeDirsFailedException(
                err=[errMsg])
    thinlv.format.setup(mountpoint=mountPoint)
    blivetEnv.doIt()

    # bz#1230495: lvm devices are invisible and appears only after vgscan
    # Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices
    rc, out, err = commands.execCmd([_vgscanCommandPath.cmd])
    if rc:
        raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err)
    fstab.FsTab().add(thinlv.path, mountPoint,
                      DEFAULT_FS_TYPE, mntOpts=[DEFAULT_MOUNT_OPTIONS])

    # If selinux is enabled, set correct selinux labels on the brick.
    if selinux.is_selinux_enabled():
        rc, out, err = commands.execCmd([_semanageCommandPath.cmd,
                                         'fcontext', '-a', '-t',
                                         'glusterd_brick_t', mountPoint])
        if rc:
            raise ge.GlusterHostFailedToSetSelinuxContext(mountPoint, rc,
                                                          out, err)
        try:
            # mountPoint can be of 'unicode' type when its passed through
            # jsonrpc. restorecon calls into a C API that needs a char *.
            # Thus, it is necessary to encode unicode to a utf-8 string.
            selinux.restorecon(mountPoint.encode('utf-8'), recursive=True)
        except OSError as e:
            errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
            raise ge.GlusterHostFailedToRunRestorecon(mountPoint, err=errMsg)
    return _getDeviceDict(thinlv)
Esempio n. 36
0
 def restore_context(self, mdict):
     selinux.restorecon(mdict['pki_instance_path'], True)
     selinux.restorecon(config.PKI_DEPLOYMENT_LOG_ROOT, True)
     selinux.restorecon(mdict['pki_instance_log_path'], True)
     selinux.restorecon(mdict['pki_instance_configuration_path'], True)