Example #1
0
    def __getResourceCandidatesList(self, resourceName, lockType):
        """
        Return list of lock candidates (template and volumes)
        """
        volResourcesList = []
        template = None
        dom = sdCache.produce(sdUUID=self.sdUUID)
        # Get the list of the volumes
        repoPath = os.path.join(self.storage_repository, dom.getPools()[0])
        try:
            chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID, imgUUID=resourceName)
        except se.ImageDoesNotExistInSD:
            log.debug("Image %s does not exist in domain %s", resourceName, self.sdUUID)
            return []

        # check if the chain is build above a template, or it is a standalone
        pvol = chain[0].getParentVolume()
        if pvol:
            template = pvol.volUUID
        elif chain[0].isShared():
            # Image of template itself,
            # with no other volumes in chain
            template = chain[0].volUUID
            del chain[:]

        volUUIDChain = [vol.volUUID for vol in chain]
        volUUIDChain.sort()

        # Activate all volumes in chain at once.
        # We will attempt to activate all volumes again down to the flow with
        # no consequence, since they are already active.
        # TODO Fix resource framework to hold images, instead of specific vols.
        # This assumes that chains can not spread into more than one SD.
        if dom.__class__.__name__ == "BlockStorageDomain":
            lvm.activateLVs(self.sdUUID, volUUIDChain)

        failed = False
        # Acquire template locks:
        # - 'lockType' for template's image itself
        # - Always 'shared' lock for image based on template
        try:
            if template:
                if len(volUUIDChain) > 0:
                    volRes = rmanager.acquireResource(self.volumeResourcesNamespace, template, rm.LockType.shared,
                                                      timeout=self.resource_default_timeout)
                else:
                    volRes = rmanager.acquireResource(self.volumeResourcesNamespace, template, lockType,
                                                      timeout=self.resource_default_timeout)
                volResourcesList.append(volRes)

            # Acquire 'lockType' volume locks
            for volUUID in volUUIDChain:
                volRes = rmanager.acquireResource(self.volumeResourcesNamespace, volUUID, lockType,
                                                    timeout=self.resource_default_timeout)

                volResourcesList.append(volRes)
        except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed), e:
            log.debug("Cannot acquire volume resource (%s)", str(e))
            failed = True
            raise
Example #2
0
    def __init__(self, sdUUID):
        domaindir = os.path.join(self.mountpoint, sdUUID)
        metadata = selectMetadata(sdUUID)
        sd.StorageDomain.__init__(self, sdUUID, domaindir, metadata)
        lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
        self.metavol = lvm.lvPath(self.sdUUID, sd.METADATA)

        try:
            self.logBlkSize = self.getMetaParam(DMDK_LOGBLKSIZE)
            self.phyBlkSize = self.getMetaParam(DMDK_PHYBLKSIZE)
        except KeyError:
            # 512 by Saggi "Trust me (Smoch Alai (sic))"
            # *blkSize keys may be missing from metadata only for domains that
            # existed before the introduction of the keys.
            # Such domains supported only 512 sizes
            self.logBlkSize = 512
            self.phyBlkSize = 512

        # Check that all devices in the VG have the same logical and physical
        # block sizes.
        lvm.checkVGBlockSizes(sdUUID, (self.logBlkSize, self.phyBlkSize))

        # _extendlock is used to prevent race between
        # VG extend and LV extend.
        self._extendlock = threading.Lock()
        self.imageGarbageCollector()
        self._registerResourceNamespaces()
        self._lastUncachedSelftest = 0
Example #3
0
    def __init__(self, sdUUID):
        domaindir = os.path.join(self.mountpoint, sdUUID)
        metadata = selectMetadata(sdUUID)
        sd.StorageDomain.__init__(self, sdUUID, domaindir, metadata)
        lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
        self.metavol = lvm.lvPath(self.sdUUID, sd.METADATA)

        try:
            self.logBlkSize = self.getMetaParam(DMDK_LOGBLKSIZE)
            self.phyBlkSize = self.getMetaParam(DMDK_PHYBLKSIZE)
        except KeyError:
            # Initialize the block sizes metadata if not defined
            self.logBlkSize, self.phyBlkSize = lvm.getVGBlockSizes(sdUUID)
            self.setMetaParam(DMDK_LOGBLKSIZE, self.logBlkSize)
            self.setMetaParam(DMDK_PHYBLKSIZE, self.phyBlkSize)

        # Check that all devices in the VG have the same logical and physical
        # block sizes.
        lvm.checkVGBlockSizes(sdUUID, (self.logBlkSize, self.phyBlkSize))

        # _extendlock is used to prevent race between
        # VG extend and LV extend.
        self._extendlock = threading.Lock()
        self.imageGarbageCollector()
        self._registerResourceNamespaces()
        self._lastUncachedSelftest = 0
Example #4
0
def _postZero(sdUUID, volumes):
    # Assumed that there is no any thread that can deactivate these LVs
    # on this host or change the rw permission on this or any other host.

    lvNames = tuple(vol.volUUID for vol in volumes)
    # Assert volumes are writable. (Don't do this at home.)
    try:
        lvm.changelv(sdUUID, lvNames, ("--permission", "rw"))
    except se.StorageException:
        # Hope this only means that some volumes were already writable.
        pass

    lvm.activateLVs(sdUUID, lvNames)

    for lv in lvm.getLV(sdUUID):
        if lv.name in lvNames:
            # wipe out the whole volume
            try:
                misc.ddWatchCopy(
                    "/dev/zero", lvm.lvPath(sdUUID, lv.name),
                    vars.task.aborting, int(lv.size),
                    recoveryCallback=volume.baseAsyncTasksRollback)
            except utils.ActionStopped:
                raise
            except Exception:
                raise se.VolumesZeroingError(lv.name)
Example #5
0
    def __init__(self, sdUUID):
        domaindir = os.path.join(self.mountpoint, sdUUID)
        metadata = selectMetadata(sdUUID)
        sd.StorageDomain.__init__(self, sdUUID, domaindir, metadata)
        lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
        self.metavol = lvm.lvPath(self.sdUUID, sd.METADATA)

        try:
            self.logBlkSize = self.getMetaParam(DMDK_LOGBLKSIZE)
            self.phyBlkSize = self.getMetaParam(DMDK_PHYBLKSIZE)
        except KeyError:
            # Initialize the block sizes metadata if not defined
            self.logBlkSize, self.phyBlkSize = lvm.getVGBlockSizes(sdUUID)
            self.setMetaParam(DMDK_LOGBLKSIZE, self.logBlkSize)
            self.setMetaParam(DMDK_PHYBLKSIZE, self.phyBlkSize)

        # Check that all devices in the VG have the same logical and physical
        # block sizes.
        lvm.checkVGBlockSizes(sdUUID, (self.logBlkSize, self.phyBlkSize))

        # _extendlock is used to prevent race between
        # VG extend and LV extend.
        self._extendlock = threading.Lock()
        self.imageGarbageCollector()
        self._registerResourceNamespaces()
        self._lastUncachedSelftest = 0
Example #6
0
    def __init__(self, sdUUID):
        domaindir = os.path.join(self.mountpoint, sdUUID)
        metadata = selectMetadata(sdUUID)
        sd.StorageDomain.__init__(self, sdUUID, domaindir, metadata)
        lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
        self.metavol = lvm.lvPath(self.sdUUID, sd.METADATA)

        try:
            self.logBlkSize = self.getMetaParam(DMDK_LOGBLKSIZE)
            self.phyBlkSize = self.getMetaParam(DMDK_PHYBLKSIZE)
        except KeyError:
            # 512 by Saggi "Trust me (Smoch Alai (sic))"
            # *blkSize keys may be missing from metadata only for domains that
            # existed before the introduction of the keys.
            # Such domains supported only 512 sizes
            self.logBlkSize = 512
            self.phyBlkSize = 512

        # Check that all devices in the VG have the same logical and physical
        # block sizes.
        lvm.checkVGBlockSizes(sdUUID, (self.logBlkSize, self.phyBlkSize))

        # _extendlock is used to prevent race between
        # VG extend and LV extend.
        self._extendlock = threading.Lock()
        self.imageGarbageCollector()
        self._registerResourceNamespaces()
        self._lastUncachedSelftest = 0
Example #7
0
    def readlines(self):
        # Fetch the metadata from metadata volume
        lvm.activateLVs(self._vgName, self._lvName)

        m = misc.readblock(self.metavol, self._offset, self._size)
        # Read from metadata volume will bring a load of zeroes trailing
        # actual metadata. Strip it out.
        metadata = [i for i in m if len(i) > 0 and i[0] != '\x00' and "=" in i]

        return metadata
Example #8
0
    def readlines(self):
        # Fetch the metadata from metadata volume
        lvm.activateLVs(self._vgName, self._lvName)

        m = misc.readblock(self.metavol, self._offset, self._size)
        # Read from metadata volume will bring a load of zeroes trailing
        # actual metadata. Strip it out.
        metadata = [i for i in m if len(i) > 0 and i[0] != '\x00' and "=" in i]

        return metadata
Example #9
0
    def mountMaster(self):
        """
        Mount the master metadata file system. Should be called only by SPM.
        """
        lvm.activateLVs(self.sdUUID, MASTERLV)
        masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR)
        fileUtils.createdir(masterDir)

        masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV)
        cmd = [constants.EXT_FSCK, "-p", masterfsdev]
        (rc, out, err) = misc.execCmd(cmd)
        # fsck exit codes
        # 0    - No errors
        # 1    - File system errors corrected
        # 2    - File system errors corrected, system should
        #        be rebooted
        # 4    - File system errors left uncorrected
        # 8    - Operational error
        # 16   - Usage or syntax error
        # 32   - E2fsck canceled by user request
        # 128  - Shared library error
        if rc == 1 or rc == 2:
           # rc is a number
           self.log.info("fsck corrected fs errors (%s)", rc)
        if rc >= 4:
            raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc)

        # TODO: Remove when upgrade is only from a version which creates ext3
        # Try to add a journal - due to unfortunate circumstances we exposed
        # to the public the code that created ext2 file system instead of ext3.
        # In order to make up for it we are trying to add journal here, just
        # to be sure (and we have fixed the file system creation).
        # If there is a journal already tune2fs will do nothing, indicating this
        # condition only with exit code. However, we do not really care.
        cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev]
        misc.execCmd(cmd)

        rc = fileUtils.mount(masterfsdev, masterDir, mountType=fileUtils.FSTYPE_EXT3)
        # mount exit codes
        # mount has the following return codes (the bits can be ORed):
        # 0      success
        # 1      incorrect invocation or permissions
        # 2      system error (out of memory, cannot fork, no more loop devices)
        # 4      internal mount bug or missing nfs support in mount
        # 8      user interrupt
        # 16     problems writing or locking /etc/mtab
        # 32     mount failure
        # 64     some mount succeeded
        if rc != 0:
            raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out)

        cmd = [constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir]
        (rc, out, err) = misc.execCmd(cmd)
        if rc != 0:
            self.log.error("failed to chown %s", masterDir)
Example #10
0
    def mountMaster(self):
        """
        Mount the master metadata file system. Should be called only by SPM.
        """
        lvm.activateLVs(self.sdUUID, MASTERLV)
        masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR)
        fileUtils.createdir(masterDir)

        masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV)
        cmd = [constants.EXT_FSCK, "-p", masterfsdev]
        (rc, out, err) = misc.execCmd(cmd,
                                      sudo=True,
                                      deathSignal=signal.SIGKILL)
        # fsck exit codes
        # 0    - No errors
        # 1    - File system errors corrected
        # 2    - File system errors corrected, system should
        #        be rebooted
        # 4    - File system errors left uncorrected
        # 8    - Operational error
        # 16   - Usage or syntax error
        # 32   - E2fsck canceled by user request
        # 128  - Shared library error
        if rc == 1 or rc == 2:
            # rc is a number
            self.log.info("fsck corrected fs errors (%s)", rc)
        if rc >= 4:
            raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc)

        # TODO: Remove when upgrade is only from a version which creates ext3
        # Try to add a journal - due to unfortunate circumstances we exposed
        # to the public the code that created ext2 file system instead of ext3.
        # In order to make up for it we are trying to add journal here, just
        # to be sure (and we have fixed the file system creation).
        # If there is a journal already tune2fs will do nothing, indicating
        # this condition only with exit code. However, we do not really care.
        cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev]
        misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL)

        masterMount = mount.Mount(masterfsdev, masterDir)

        try:
            masterMount.mount(vfstype=mount.VFS_EXT3)
        except mount.MountError as ex:
            rc, out = ex
            raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out)

        cmd = [
            constants.EXT_CHOWN,
            "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP),
            masterDir
        ]
        (rc, out, err) = misc.execCmd(cmd, sudo=True)
        if rc != 0:
            self.log.error("failed to chown %s", masterDir)
Example #11
0
    def activateVolumes(self, imgUUID, volUUIDs):
        """
        Activate all the volumes belonging to the image.

        imgUUID: the image to be deactivated.
        allVols: getAllVolumes result.

        If the image is based on a template image it will be activated.
        """
        lvm.activateLVs(self.sdUUID, volUUIDs)
        vgDir = os.path.join("/dev", self.sdUUID)
        return self.createImageLinks(vgDir, imgUUID, volUUIDs)
Example #12
0
    def activateVolumes(self, imgUUID, volUUIDs):
        """
        Activate all the volumes belonging to the image.

        imgUUID: the image to be deactivated.
        allVols: getAllVolumes result.

        If the image is based on a template image it will be activated.
        """
        lvm.activateLVs(self.sdUUID, volUUIDs)
        vgDir = os.path.join("/dev", self.sdUUID)
        return self.createImageLinks(vgDir, imgUUID, volUUIDs)
Example #13
0
    def mountMaster(self):
        """
        Mount the master metadata file system. Should be called only by SPM.
        """
        lvm.activateLVs(self.sdUUID, MASTERLV)
        masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR)
        fileUtils.createdir(masterDir)

        masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV)
        cmd = [constants.EXT_FSCK, "-p", masterfsdev]
        (rc, out, err) = misc.execCmd(cmd, sudo=True,
                                      deathSignal=signal.SIGKILL)
        # fsck exit codes
        # 0    - No errors
        # 1    - File system errors corrected
        # 2    - File system errors corrected, system should
        #        be rebooted
        # 4    - File system errors left uncorrected
        # 8    - Operational error
        # 16   - Usage or syntax error
        # 32   - E2fsck canceled by user request
        # 128  - Shared library error
        if rc == 1 or rc == 2:
            # rc is a number
            self.log.info("fsck corrected fs errors (%s)", rc)
        if rc >= 4:
            raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc)

        # TODO: Remove when upgrade is only from a version which creates ext3
        # Try to add a journal - due to unfortunate circumstances we exposed
        # to the public the code that created ext2 file system instead of ext3.
        # In order to make up for it we are trying to add journal here, just
        # to be sure (and we have fixed the file system creation).
        # If there is a journal already tune2fs will do nothing, indicating
        # this condition only with exit code. However, we do not really care.
        cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev]
        misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL)

        masterMount = mount.Mount(masterfsdev, masterDir)

        try:
            masterMount.mount(vfstype=mount.VFS_EXT3)
        except mount.MountError as ex:
            rc, out = ex
            raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out)

        cmd = [constants.EXT_CHOWN, "%s:%s" %
               (constants.METADATA_USER, constants.METADATA_GROUP), masterDir]
        (rc, out, err) = misc.execCmd(cmd, sudo=True)
        if rc != 0:
            self.log.error("failed to chown %s", masterDir)
Example #14
0
    def refreshDirTree(self):
        # create domain images folder
        imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES)
        fileUtils.createdir(imagesPath)

        # create domain special volumes folder
        domMD = os.path.join(self.domaindir, sd.DOMAIN_META_DATA)
        fileUtils.createdir(domMD)

        lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
        for lvName in SPECIAL_LVS:
            dst = os.path.join(domMD, lvName)
            if not os.path.lexists(dst):
                src = lvm.lvPath(self.sdUUID, lvName)
                os.symlink(src, dst)
Example #15
0
    def refreshDirTree(self):
        # create domain images folder
        imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES)
        fileUtils.createdir(imagesPath)

        # create domain special volumes folder
        domMD = os.path.join(self.domaindir, sd.DOMAIN_META_DATA)
        fileUtils.createdir(domMD)

        lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
        for lvName in SPECIAL_LVS:
            dst = os.path.join(domMD, lvName)
            if not os.path.lexists(dst):
                src = lvm.lvPath(self.sdUUID, lvName)
                os.symlink(src, dst)
Example #16
0
    def writelines(self, lines):
        lvm.activateLVs(self._vgName, self._lvName)

        # Write `metadata' to metadata volume
        metaStr = StringIO()

        for line in lines:
            metaStr.write(line)
            metaStr.write("\n")

        if metaStr.pos > self._size:
            raise se.MetadataOverflowError()

        # Clear out previous data - it is a volume, not a file
        metaStr.write('\0' * (self._size - metaStr.pos))

        data = metaStr.getvalue()
        with fileUtils.DirectFile(self.metavol, "r+d") as f:
            f.seek(self._offset)
            f.write(data)
Example #17
0
    def writelines(self, lines):
        lvm.activateLVs(self._vgName, self._lvName)

        # Write `metadata' to metadata volume
        metaStr = StringIO()

        for line in lines:
            metaStr.write(line)
            metaStr.write("\n")

        if metaStr.pos > self._size:
            raise se.MetadataOverflowError()

        # Clear out previous data - it is a volume, not a file
        metaStr.write('\0' * (self._size - metaStr.pos))

        data = metaStr.getvalue()
        with fileUtils.DirectFile(self.metavol, "r+d") as f:
            f.seek(self._offset)
            f.write(data)
Example #18
0
    def __getResourceCandidatesList(self, resourceName, lockType):
        """
        Return list of lock candidates (template and volumes)
        """
        volResourcesList = []
        template = None
        dom = sdCache.produce(sdUUID=self.sdUUID)
        # Get the list of the volumes
        repoPath = os.path.join(self.storage_repository, dom.getPools()[0])
        try:
            chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID,
                                                   imgUUID=resourceName)
        except se.ImageDoesNotExistInSD:
            log.debug("Image %s does not exist in domain %s", resourceName,
                      self.sdUUID)
            return []

        # check if the chain is build above a template, or it is a standalone
        pvol = chain[0].getParentVolume()
        if pvol:
            template = pvol.volUUID
        elif chain[0].isShared():
            # Image of template itself,
            # with no other volumes in chain
            template = chain[0].volUUID
            del chain[:]

        volUUIDChain = [vol.volUUID for vol in chain]
        volUUIDChain.sort()

        # Activate all volumes in chain at once.
        # We will attempt to activate all volumes again down to the flow with
        # no consequence, since they are already active.
        # TODO Fix resource framework to hold images, instead of specific vols.
        # This assumes that chains can not spread into more than one SD.
        if dom.__class__.__name__ == "BlockStorageDomain":
            lvm.activateLVs(self.sdUUID, volUUIDChain)

        failed = False
        # Acquire template locks:
        # - 'lockType' for template's image itself
        # - Always 'shared' lock for image based on template
        try:
            if template:
                if len(volUUIDChain) > 0:
                    volRes = rmanager.acquireResource(
                        self.volumeResourcesNamespace,
                        template,
                        rm.LockType.shared,
                        timeout=self.resource_default_timeout)
                else:
                    volRes = rmanager.acquireResource(
                        self.volumeResourcesNamespace,
                        template,
                        lockType,
                        timeout=self.resource_default_timeout)
                volResourcesList.append(volRes)

            # Acquire 'lockType' volume locks
            for volUUID in volUUIDChain:
                volRes = rmanager.acquireResource(
                    self.volumeResourcesNamespace,
                    volUUID,
                    lockType,
                    timeout=self.resource_default_timeout)

                volResourcesList.append(volRes)
        except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed) as e:
            log.debug("Cannot acquire volume resource (%s)", str(e))
            failed = True
            raise
        except Exception:
            log.debug("Cannot acquire volume resource", exc_info=True)
            failed = True
            raise
        finally:
            if failed:
                # Release already acquired template/volumes locks
                for volRes in volResourcesList:
                    volRes.release()

        return volResourcesList
Example #19
0
 def activateVolumes(self, imgUUID, volUUIDs):
     """
     Activate all the volumes listed in volUUIDs
     """
     lvm.activateLVs(self.sdUUID, volUUIDs)
Example #20
0
    def __init__(self, vg, lv, lockType):
        self._vg = vg
        self._lv = lv

        lvm.activateLVs(self._vg, self._lv)
Example #21
0
 def getIdsFilePath(self):
     lvm.activateLVs(self.sdUUID, [sd.IDS])
     return lvm.lvPath(self.sdUUID, sd.IDS)
Example #22
0
    def __init__(self, vg, lv, lockType):
        self._vg = vg
        self._lv = lv

        lvm.activateLVs(self._vg, [self._lv])
Example #23
0
 def activateVolumes(self, volUUIDs):
     """
     Activate all the volumes listed in volUUIDs
     """
     lvm.activateLVs(self.sdUUID, volUUIDs)
Example #24
0
 def getLeasesFilePath(self):
     lvm.activateLVs(self.sdUUID, [sd.LEASES])
     return lvm.lvPath(self.sdUUID, sd.LEASES)
Example #25
0
    def mountMaster(self):
        """
        Mount the master metadata file system. Should be called only by SPM.
        """
        lvm.activateLVs(self.sdUUID, MASTERLV)
        masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR)
        fileUtils.createdir(masterDir)

        masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV)
        cmd = [constants.EXT_FSCK, "-p", masterfsdev]
        (rc, out, err) = misc.execCmd(cmd)
        # fsck exit codes
        # 0    - No errors
        # 1    - File system errors corrected
        # 2    - File system errors corrected, system should
        #        be rebooted
        # 4    - File system errors left uncorrected
        # 8    - Operational error
        # 16   - Usage or syntax error
        # 32   - E2fsck canceled by user request
        # 128  - Shared library error
        if rc == 1 or rc == 2:
            # rc is a number
            self.log.info("fsck corrected fs errors (%s)", rc)
        if rc >= 4:
            raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc)

        # TODO: Remove when upgrade is only from a version which creates ext3
        # Try to add a journal - due to unfortunate circumstances we exposed
        # to the public the code that created ext2 file system instead of ext3.
        # In order to make up for it we are trying to add journal here, just
        # to be sure (and we have fixed the file system creation).
        # If there is a journal already tune2fs will do nothing, indicating this
        # condition only with exit code. However, we do not really care.
        cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev]
        misc.execCmd(cmd)

        rc = fileUtils.mount(masterfsdev,
                             masterDir,
                             mountType=fileUtils.FSTYPE_EXT3)
        # mount exit codes
        # mount has the following return codes (the bits can be ORed):
        # 0      success
        # 1      incorrect invocation or permissions
        # 2      system error (out of memory, cannot fork, no more loop devices)
        # 4      internal mount bug or missing nfs support in mount
        # 8      user interrupt
        # 16     problems writing or locking /etc/mtab
        # 32     mount failure
        # 64     some mount succeeded
        if rc != 0:
            raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out)

        cmd = [
            constants.EXT_CHOWN,
            "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP),
            masterDir
        ]
        (rc, out, err) = misc.execCmd(cmd)
        if rc != 0:
            self.log.error("failed to chown %s", masterDir)
Example #26
0
 def getIdsFilePath(self):
     lvm.activateLVs(self.sdUUID, [sd.IDS])
     return lvm.lvPath(self.sdUUID, sd.IDS)
Example #27
0
 def getLeasesFilePath(self):
     lvm.activateLVs(self.sdUUID, [sd.LEASES])
     return lvm.lvPath(self.sdUUID, sd.LEASES)
Example #28
0
    def __init__(self, vg, lv, lockType):
        self._vg = vg
        self._lv = lv

        lvm.activateLVs(self._vg, self._lv)
        self.switchLockType(lockType)