def createImageLinks(self, srcImgPath, imgUUID, volUUIDs): """ qcow chain is build by reading each qcow header and reading the path to the parent. When creating the qcow layer, we pass a relative path which allows us to build a directory with links to all volumes in the chain anywhere we want. This method creates a directory with the image uuid under /var/run/vdsm and creates sym links to all the volumes in the chain. srcImgPath: Dir where the image volumes are. """ sdRunDir = os.path.join(constants.P_VDSM_STORAGE, self.sdUUID) imgRunDir = os.path.join(sdRunDir, imgUUID) fileUtils.createdir(imgRunDir) for volUUID in volUUIDs: srcVol = os.path.join(srcImgPath, volUUID) dstVol = os.path.join(imgRunDir, volUUID) self.log.debug("Creating symlink from %s to %s", srcVol, dstVol) try: os.symlink(srcVol, dstVol) except OSError as e: if e.errno == errno.EEXIST: self.log.debug("img run vol already exists: %s", dstVol) else: self.log.error("Failed to create img run vol: %s", dstVol) raise return imgRunDir
def connect(self): if self._mount.isMounted(): return fileUtils.createdir(self._getLocalPath()) try: self._mount.mount(self.options, self._vfsType) except MountError: t, v, tb = sys.exc_info() try: os.rmdir(self._getLocalPath()) except OSError as e: self.log.warn("Error removing mountpoint directory %r: %s", self._getLocalPath(), e) raise t, v, tb else: try: fileSD.validateDirAccess( self.getMountObj().getRecord().fs_file) except se.StorageServerAccessPermissionError: t, v, tb = sys.exc_info() try: self.disconnect() except OSError: self.log.exception("Error disconnecting") raise t, v, tb
def connect(self): if self._mount.isMounted(): return self.validate() fileUtils.createdir(self._getLocalPath()) try: self._mount.mount(self.options, self._vfsType, cgroup=self.CGROUP) except MountError: t, v, tb = sys.exc_info() try: os.rmdir(self._getLocalPath()) except OSError as e: self.log.warn("Error removing mountpoint directory %r: %s", self._getLocalPath(), e) six.reraise(t, v, tb) else: try: fileSD.validateDirAccess( self.getMountObj().getRecord().fs_file) except se.StorageServerAccessPermissionError: t, v, tb = sys.exc_info() try: self.disconnect() except OSError: self.log.exception("Error disconnecting") six.reraise(t, v, tb)
def __connectLocalConnection(self, conParams): """ Connect to a storage low level entity. """ conStatus = [] localPath = os.path.join(self.storage_repository, sd.DOMAIN_MNT_POINT) fileUtils.createdir(localPath) for con in conParams: rc = 0 try: if os.path.exists(con['rp']): lnPoint = fileUtils.transformPath(con['rp']) lnPath = os.path.join(localPath, lnPoint) if not os.path.lexists(lnPath): os.symlink(con['rp'], lnPath) else: self.log.error("Path %s does not exists.", con['rp']) rc = se.StorageServerConnectionError.code except se.StorageException, ex: rc = ex.code self.log.error("Error during storage connection: %s", str(ex), exc_info=True) except Exception, ex: rc = se.StorageServerConnectionError.code self.log.error("Error during storage connection: %s", str(ex), exc_info=True)
def createImageLinks(self, srcImgPath, imgUUID, volUUIDs): """ qcow chain is build by reading each qcow header and reading the path to the parent. When creating the qcow layer, we pass a relative path which allows us to build a directory with links to all volumes in the chain anywhere we want. This method creates a directory with the image uuid under /var/run/vdsm and creates sym links to all the volumes in the chain. srcImgPath: Dir where the image volumes are. """ sdRunDir = os.path.join(constants.P_VDSM_STORAGE, self.sdUUID) imgRunDir = os.path.join(sdRunDir, imgUUID) fileUtils.createdir(imgRunDir) for volUUID in volUUIDs: srcVol = os.path.join(srcImgPath, volUUID) dstVol = os.path.join(imgRunDir, volUUID) try: os.symlink(srcVol, dstVol) except OSError as e: if e.errno == errno.EEXIST: self.log.debug("img run vol already exists: %s", dstVol) else: self.log.error("Failed to create img run vol: %s", dstVol) raise return imgRunDir
def connect(self): if self._mount.isMounted(): return fileUtils.createdir(self._getLocalPath()) try: self._mount.mount(self.options, self._vfsType) except MountError as e: self.log.error("Mount failed: %s", e, exc_info=True) try: os.rmdir(self._getLocalPath()) except OSError: self.log.warn("Failed to remove mount point directory: %s", self._getLocalPath(), exc_info=True) raise e else: try: fileSD.validateDirAccess( self.getMountObj().getRecord().fs_file) except se.StorageServerAccessPermissionError as e: try: self.disconnect() except OSError: self.log.warn("Error while disconnecting after access" "problem", exc_info=True) raise e
def connect(self): if self._mount.isMounted(): return fileUtils.createdir(self._getLocalPath()) try: self._mount.mount(self.options, self._vfsType) except MountError as e: self.log.error("Mount failed: %s", e, exc_info=True) try: os.rmdir(self._getLocalPath()) except OSError: self.log.warn("Failed to remove mount point directory: %s", self._getLocalPath(), exc_info=True) raise e else: try: fileSD.validateDirAccess( self.getMountObj().getRecord().fs_file) except se.StorageServerAccessPermissionError as e: try: self.disconnect() except OSError: self.log.warn( "Error while disconnecting after access" "problem", exc_info=True) raise e
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating this # condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd) rc = fileUtils.mount(masterfsdev, masterDir, mountType=fileUtils.FSTYPE_EXT3) # mount exit codes # mount has the following return codes (the bits can be ORed): # 0 success # 1 incorrect invocation or permissions # 2 system error (out of memory, cannot fork, no more loop devices) # 4 internal mount bug or missing nfs support in mount # 8 user interrupt # 16 problems writing or locking /etc/mtab # 32 mount failure # 64 some mount succeeded if rc != 0: raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir] (rc, out, err) = misc.execCmd(cmd) if rc != 0: self.log.error("failed to chown %s", masterDir)
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating # this condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) masterMount = mount.Mount(masterfsdev, masterDir) try: masterMount.mount(vfstype=mount.VFS_EXT3) except mount.MountError as ex: rc, out = ex raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [ constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir ] (rc, out, err) = misc.execCmd(cmd, sudo=True) if rc != 0: self.log.error("failed to chown %s", masterDir)
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating # this condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) masterMount = mount.Mount(masterfsdev, masterDir) try: masterMount.mount(vfstype=mount.VFS_EXT3) except mount.MountError as ex: rc, out = ex raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir] (rc, out, err) = misc.execCmd(cmd, sudo=True) if rc != 0: self.log.error("failed to chown %s", masterDir)
def refreshDirTree(self): # create domain images folder imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES) fileUtils.createdir(imagesPath) # create domain special volumes folder domMD = os.path.join(self.domaindir, sd.DOMAIN_META_DATA) fileUtils.createdir(domMD) lvm.activateLVs(self.sdUUID, SPECIAL_LVS) for lvName in SPECIAL_LVS: dst = os.path.join(domMD, lvName) if not os.path.lexists(dst): src = lvm.lvPath(self.sdUUID, lvName) os.symlink(src, dst)
def __connectFileServer(self, conParams, fsType): """ Connect to a storage low level entity. """ conStatus = [] localPath = os.path.join(self.storage_repository, sd.DOMAIN_MNT_POINT) fileUtils.createdir(localPath) for con in conParams: try: mntPoint = fileUtils.transformPath(con['rp']) mntPath = os.path.join(localPath, mntPoint) if fsType == fileUtils.FSTYPE_NFS: # Stale handle usually resolves itself when doing directory lookups # BUT if someone deletes the export on the servers side. We will keep # getting stale handles and this is unresolvable unless you umount and # remount. if getProcPool().fileUtils.isStaleHandle(mntPath): # A VM might be holding a stale handle, we have to umount # but we can't umount as long as someone is holding a handle # even if it's stale. We use lazy so we can at least recover. # Processes having an open file handle will not recover until # they reopen the files. getProcPool().fileUtils.umount(con['rp'], mntPath, lazy=True) fileUtils.createdir(mntPath) rc = getProcPool().fileUtils.mount(con['rp'], mntPath, fsType) if rc == 0: try: validateDirAccess(mntPath) except se.StorageServerAccessPermissionError, ex: self.log.debug("Unmounting file system %s " "(not enough access permissions)" % con['rp']) getProcPool().fileUtils.umount(con['rp'], mntPath, fsType) raise else: self.log.error("Error during storage connection: rc=%s", rc, exc_info=True) rc = se.StorageServerConnectionError.code
def create(cls, sdUUID, domainName, domClass, remotePath, storageType, version): """ Create new storage domain. 'sdUUID' - Storage Domain UUID 'domainName' - storage domain name ("iso" or "data domain name") 'domClass' - Data/Iso 'remotePath' - /data2 'storageType' - NFS_DOMAIN, LOCALFS_DOMAIN, &etc. 'version' - DOMAIN_VERSIONS """ cls.log.info("sdUUID=%s domainName=%s remotePath=%s " "domClass=%s", sdUUID, domainName, remotePath, domClass) if not misc.isAscii(domainName) and not sd.supportsUnicode(version): raise se.UnicodeArgumentException() # Create local path mntPath = fileUtils.transformPath(remotePath) mntPoint = os.path.join(cls.storage_repository, sd.DOMAIN_MNT_POINT, mntPath) cls._preCreateValidation(sdUUID, mntPoint, remotePath, version) domainDir = os.path.join(mntPoint, sdUUID) cls._prepareMetadata(domainDir, sdUUID, domainName, domClass, remotePath, storageType, version) # create domain images folder imagesDir = os.path.join(domainDir, sd.DOMAIN_IMAGES) fileUtils.createdir(imagesDir) # create special imageUUID for ISO/Floppy volumes # Actually the local domain shouldn't be ISO, but # we can allow it for systems without NFS at all if domClass is sd.ISO_DOMAIN: isoDir = os.path.join(imagesDir, sd.ISO_IMAGE_UUID) fileUtils.createdir(isoDir) fsd = LocalFsStorageDomain(os.path.join(mntPoint, sdUUID)) fsd.initSPMlease() return fsd
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating this # condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd) rc = fileUtils.mount(masterfsdev, masterDir, mountType=fileUtils.FSTYPE_EXT3) # mount exit codes # mount has the following return codes (the bits can be ORed): # 0 success # 1 incorrect invocation or permissions # 2 system error (out of memory, cannot fork, no more loop devices) # 4 internal mount bug or missing nfs support in mount # 8 user interrupt # 16 problems writing or locking /etc/mtab # 32 mount failure # 64 some mount succeeded if rc != 0: raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [ constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir ] (rc, out, err) = misc.execCmd(cmd) if rc != 0: self.log.error("failed to chown %s", masterDir)