def delete(self, sr_uuid): self.attach(sr_uuid) cleanup.gc_force(self.session, self.uuid) # check to make sure no VDIs are present; then remove old # files that are non VDI's try: if util.ioretry(lambda: util.pathexists(self.path)): #Load the VDI list self._loadvdis() for uuid in self.vdis: if not self.vdis[uuid].deleted: raise xs_errors.XenError('SRNotEmpty', \ opterr='VDIs still exist in SR') # remove everything else, there are no vdi's for name in util.ioretry(lambda: util.listdir(self.path)): fullpath = os.path.join(self.path,name) try: util.ioretry(lambda: os.unlink(fullpath)) except util.CommandException, inst: if inst.code != errno.ENOENT and \ inst.code != errno.EISDIR: raise xs_errors.XenError('FileSRDelete', \ opterr='failed to remove %s error %d' \ % (fullpath, inst.code)) self.detach(sr_uuid)
def create(self, sr_uuid, size): if util.ioretry(lambda: self._checkmount()): raise xs_errors.XenError('NFSAttached') # Set the target path temporarily to the base dir # so that we can create the target SR directory self.remotepath = self.dconf['serverpath'] try: self.attach(sr_uuid) except: try: os.rmdir(self.path) except: pass raise xs_errors.XenError('NFSMount') newpath = os.path.join(self.path, sr_uuid) if util.ioretry(lambda: util.pathexists(newpath)): if len(util.ioretry(lambda: util.listdir(newpath))) != 0: self.detach(sr_uuid) raise xs_errors.XenError('SRExists') else: try: util.ioretry(lambda: util.makedirs(newpath)) except util.CommandException, inst: if inst.code != errno.EEXIST: self.detach(sr_uuid) raise xs_errors.XenError('NFSCreate', \ opterr='remote directory creation error is %d' \ % inst.code)
def scan_srlist(path, dconf): """Scan and report SR, UUID.""" dom = xml.dom.minidom.Document() element = dom.createElement("SRlist") dom.appendChild(element) for val in filter(util.match_uuid, util.ioretry( lambda: util.listdir(path))): fullpath = os.path.join(path, val) if not util.ioretry(lambda: util.isdir(fullpath)): continue entry = dom.createElement('SR') element.appendChild(entry) subentry = dom.createElement("UUID") entry.appendChild(subentry) textnode = dom.createTextNode(val) subentry.appendChild(textnode) from NFSSR import PROBEVERSION if dconf.has_key(PROBEVERSION): util.SMlog("Add supported nfs versions to sr-probe") supported_versions = get_supported_nfs_versions(dconf.get('server')) supp_ver = dom.createElement("SupportedVersions") element.appendChild(supp_ver) for ver in supported_versions: version = dom.createElement('Version') supp_ver.appendChild(version) textnode = dom.createTextNode(ver) version.appendChild(textnode) return dom.toprettyxml()
def delete(self, sr_uuid): if not self._checkpath(self.path): raise xs_errors.XenError("SRUnavailable", opterr="no such directory %s" % self.path) cleanup.gc_force(self.session, self.uuid) # check to make sure no VDIs are present; then remove old # files that are non VDI's try: if util.ioretry(lambda: util.pathexists(self.path)): # Load the VDI list self._loadvdis() for uuid in self.vdis: if not self.vdis[uuid].deleted: raise xs_errors.XenError("SRNotEmpty", opterr="VDIs still exist in SR") # remove everything else, there are no vdi's for name in util.ioretry(lambda: util.listdir(self.path)): fullpath = os.path.join(self.path, name) try: util.ioretry(lambda: os.unlink(fullpath)) except util.CommandException, inst: if inst.code != errno.ENOENT and inst.code != errno.EISDIR: raise xs_errors.XenError( "FileSRDelete", opterr="failed to remove %s error %d" % (fullpath, inst.code) ) except util.CommandException, inst: raise xs_errors.XenError("FileSRDelete", opterr="error %d" % inst.code)
def create(self, sr_uuid, vdi_uuid, size): if util.ioretry(lambda: util.pathexists(self.path)): raise xs_errors.XenError('VDIExists') overhead = 0 if self.vdi_type == vhdutil.VDI_TYPE_VHD: overhead = vhdutil.calcOverheadFull(long(size)) # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() if (sr_size - reserved) < (long(size) + overhead): raise xs_errors.XenError('SRNoSpace') if self.vdi_type == vhdutil.VDI_TYPE_VHD: try: size = vhdutil.validate_and_round_vhd_size(long(size)) mb = 1024L * 1024L size_mb = long(size) / mb util.ioretry(lambda: self._create(str(size_mb), self.path)) self.size = util.ioretry(lambda: self._query_v(self.path)) except util.CommandException, inst: raise xs_errors.XenError('VDICreate', opterr='error %d' % inst.code)
def load(self, vdi_uuid): self.vdi_type = SR.DEFAULT_TAP self.path = os.path.join(self.sr.path, "%s.%s" % \ (vdi_uuid,self.vdi_type)) if util.ioretry(lambda: util.pathexists(self.path)): try: st = util.ioretry(lambda: os.stat(self.path)) self.utilisation = long(st.st_size) except util.CommandException, inst: if inst.code == errno.EIO: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path) else: raise xs_errors.XenError('VDIType', \ opterr='Invalid VDI type %s' % self.vdi_type) try: diskinfo = util.ioretry(lambda: self._query_info(self.path)) if diskinfo.has_key('parent'): self.parent = diskinfo['parent'] else: self.parent = '' self.size = long(diskinfo['size']) * 1024 * 1024 self.hidden = long(diskinfo['hidden']) except util.CommandException, inst: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path)
def create(self, sr_uuid, vdi_uuid, size): if util.ioretry(lambda: util.pathexists(self.path)): raise xs_errors.XenError('VDIExists') overhead = 0 if self.vdi_type == vhdutil.VDI_TYPE_VHD: overhead = vhdutil.calcOverheadFull(long(size)) # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() if (sr_size - reserved) < (long(size) + overhead): raise xs_errors.XenError('SRNoSpace') if self.vdi_type == vhdutil.VDI_TYPE_VHD: try: mb = 1024L * 1024L size_mb = util.roundup(VHD_SIZE_INC, long(size)) / mb if size_mb < 1 or (size_mb + (overhead / mb)) >= MAX_DISK_MB: raise xs_errors.XenError('VDISize', opterr='VDI size ' + \ 'must be between 1 MB and %d MB' % \ ((MAX_DISK_MB - MAX_DISK_METADATA) - 1)) util.ioretry(lambda: self._create(str(size_mb), self.path)) self.size = util.ioretry(lambda: self._query_v(self.path)) except util.CommandException, inst: raise xs_errors.XenError('VDICreate', opterr='error %d' % inst.code)
def _find_path_with_retries(self, vdi_uuid, maxretry=5, period=2.0): vhd_path = os.path.join(self.sr.path, "%s.%s" % \ (vdi_uuid, self.PARAM_VHD)) raw_path = os.path.join(self.sr.path, "%s.%s" % \ (vdi_uuid, self.PARAM_RAW)) cbt_path = os.path.join(self.sr.path, "%s.%s" % (vdi_uuid, CBTLOG_TAG)) found = False tries = 0 while tries < maxretry and not found: tries += 1 if util.ioretry(lambda: util.pathexists(vhd_path)): self.vdi_type = vhdutil.VDI_TYPE_VHD self.path = vhd_path found = True elif util.ioretry(lambda: util.pathexists(raw_path)): self.vdi_type = vhdutil.VDI_TYPE_RAW self.path = raw_path self.hidden = False found = True elif util.ioretry(lambda: util.pathexists(cbt_path)): self.vdi_type = CBTLOG_TAG self.path = cbt_path self.hidden = False found = True if not found: util.SMlog("VHD %s not found, retry %s of %s" % (vhd_path, tries, maxretry)) time.sleep(period) return found
def lock(self, sr_uuid, vdi_uuid, force, l_uuid): util.ioretry(lambda: self._lockt(force, l_uuid)) if self.status < 0: oktosteal = util.ioretry(lambda: self._checklock(l_uuid)) if oktosteal: util.ioretry(lambda: self._lockt("1", l_uuid)) return super(NFSVDI, self).lock(sr_uuid, vdi_uuid, force, l_uuid)
def mount(self, mountpoint=None): """Mount the remote CIFS export at 'mountpoint'""" if mountpoint == None: mountpoint = self.mountpoint elif not util.is_string(mountpoint) or mountpoint == "": raise CifsException("mountpoint not a string object") missing_params = set() if not self.dconf.has_key('username'): missing_params.add('username') if not (self.dconf.has_key('password') or self.dconf.has_key('password_secret')): missing_params.add('password') if missing_params: errstr = 'device-config is missing the following parameters: ' + \ ', '.join([param for param in missing_params]) raise xs_errors.XenError('ConfigParamsMissing', opterr=errstr) try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise CifsException("Failed to make directory: code is %d" % inst.code)
def _checkpath(self, path): try: if util.ioretry(lambda: util.pathexists(path)): if util.ioretry(lambda: util.isdir(path)): return True return False except util.CommandException, inst: raise xs_errors.XenError("EIO", opterr="IO error checking path %s" % path)
def soft_mount(mountpoint, remoteserver, remotepath, transport): """Mount the remote NFS export at 'mountpoint'""" try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise NfsException("Failed to make directory: code is %d" % inst.code)
def check_server_tcp(server): """Make sure that NFS over TCP/IP V3 is supported on the server. Returns True if everything is OK, False otherwise.""" try: util.ioretry(lambda: util.pread([RPCINFO_BIN,"-t", "%s" % server, "nfs","3"]), errlist=[errno.EPERM], maxretry=2, nofail=True) except util.CommandException, inst: raise NfsException("rpcinfo failed or timed out: return code %d" % inst.code)
def soft_mount(mountpoint, remoteserver, remotepath, transport, timeout=0, nfsversion=DEFAULT_NFSVERSION): """Mount the remote NFS export at 'mountpoint'. The 'timeout' param here is in seconds""" try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise NfsException("Failed to make directory: code is %d" % inst.code)
def _load(self, vdi_uuid): self.vdi_type = SR.DEFAULT_TAP self.path = os.path.join(self.sr.path, "%s.%s" % \ (vdi_uuid,self.vdi_type)) if self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): # VHD info already preloaded: use it instead of querying directly vhdInfo = self.sr.vhds[vdi_uuid] self.utilisation = vhdInfo.sizePhys self.size = vhdInfo.sizeVirt self.hidden = vhdInfo.hidden if self.hidden: self.managed = False self.parent = vhdInfo.parentUuid if self.parent: self.sm_config_override = {'vhd-parent':self.parent} else: self.sm_config_override = {'vhd-parent':None} return try: # Change to the SR directory in case parent # locator field path has changed os.chdir(self.sr.path) except: raise xs_errors.XenError('SRUnavailable') if util.ioretry(lambda: util.pathexists(self.path)): try: st = util.ioretry(lambda: os.stat(self.path)) self.utilisation = long(st.st_size) except util.CommandException, inst: if inst.code == errno.EIO: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path) else: raise xs_errors.XenError('VDIType', \ opterr='Invalid VDI type %s' % self.vdi_type) try: diskinfo = util.ioretry(lambda: self._query_info(self.path)) if diskinfo.has_key('parent'): self.parent = diskinfo['parent'] self.sm_config_override = {'vhd-parent':self.parent} else: self.sm_config_override = {'vhd-parent':None} self.parent = '' self.size = long(diskinfo['size']) * 1024 * 1024 self.hidden = long(diskinfo['hidden']) if self.hidden: self.managed = False self.exists = True except util.CommandException, inst: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path)
def check_server_service(server): """Ensure NFS service is up and available on the remote server. Raises exception if fails to detect service after NFS_SERVICE_RETRY * NFS_SERVICE_WAIT """ util.ioretry(lambda: util.pread([RPCINFO_BIN, "-t", "%s" % server, "nfs"]), errlist=[errno.EPERM, errno.EPIPE, errno.EIO], maxretry=NFS_SERVICE_RETRY, period=NFS_SERVICE_WAIT, nofail=True)
def delete(self, sr_uuid, vdi_uuid): if not util.ioretry(lambda: util.pathexists(self.path)): return if self.attached: raise xs_errors.XenError('VDIInUse') try: util.ioretry(lambda: self._mark_hidden(self.path)) except util.CommandException, inst: raise xs_errors.XenError('VDIDelete', opterr='error %d' % inst.code)
def delete(self, sr_uuid, vdi_uuid): if not util.ioretry(lambda: util.pathexists(self.path)): return if self.attached: raise xs_errors.XenError("VDIInUse") if self.vdi_type == vhdutil.VDI_TYPE_VHD: try: util.ioretry(lambda: self._mark_hidden(self.path)) except util.CommandException, inst: raise xs_errors.XenError("VDIDelete", opterr="error %d" % inst.code)
def attach(self, sr_uuid): if not self._checkmount(): try: util.ioretry(lambda: util.makedirs(self.path)) except util.CommandException, inst: if inst.code != errno.EEXIST: raise xs_errors.XenError("FileSRCreate", \ opterr='fail to create mount point. Errno is %s' % inst.code) try: util.pread(["mount", "--bind", self.remotepath, self.path]) except util.CommandException, inst: raise xs_errors.XenError('FileSRCreate', \ opterr='fail to mount FileSR. Errno is %s' % inst.code)
def mount(self, mountpoint=None): """Mount the remote SMB export at 'mountpoint'""" if mountpoint == None: mountpoint = self.mountpoint elif not util.is_string(mountpoint) or mountpoint == "": raise SMBException("mountpoint not a string object") try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise SMBException("Failed to make directory: code is %d" % inst.code)
def soft_mount(mountpoint, remoteserver, remotepath, transport, useroptions='', timeout=None, nfsversion=DEFAULT_NFSVERSION, retrans=None): """Mount the remote NFS export at 'mountpoint'. The 'timeout' param here is in deciseconds (tenths of a second). See nfs(5) for details. """ try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise NfsException("Failed to make directory: code is %d" % inst.code)
def delete(self, sr_uuid): # try to remove/delete non VDI contents first super(CIFSSR, self).delete(sr_uuid) try: if self.checkmount(): self.detach(sr_uuid) self.mount() if util.ioretry(lambda: util.pathexists(self.linkpath)): util.ioretry(lambda: os.rmdir(self.linkpath)) self.unmount(self.mountpoint, True) except util.CommandException, inst: self.detach(sr_uuid) if inst.code != errno.ENOENT: raise xs_errors.XenError('CIFSDelete')
def mount(self, mountpoint, blockdevice): try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException: raise xs_errors.XenError('SRUnavailable', \ opterr='no such directory %s' % mountpoint) cmd = ['mount', '-t', 'ocfs2', blockdevice, mountpoint, '-o', \ 'noatime,data=writeback,nointr,commit=60,coherency=buffered'] try: ret = util.pread(cmd) except util.CommandException, inst: raise xs_errors.XenError('OCFSMount', opterr='Failed to mount FS. Errno is %d' \ % os.strerror(inst.code))
def unlock(self, sr_uuid, vdi_uuid, l_uuid): try: cmd = [SR.LOCK_UTIL, "unlock", self.path, "w", l_uuid] self.status = util.ioretry(lambda: util.pread2(cmd)) except util.CommandException, inst: if inst.code != errno.ENOENT: raise xs_errors.XenError('VDIInUse', \ opterr='Unable to release lock')
def _checkmount(self): mount_path = self.path if self.handles("cifs"): mount_path = self.mountpoint return util.ioretry(lambda: util.pathexists(mount_path) and \ (util.ismount(mount_path) or \ util.pathexists(self.remotepath) and self._isbind()))
def detach(self, sr_uuid, vdi_uuid): try: if os.path.exists(self.path): cmd = ["lvchange", "-an", self.path] text = util.ioretry(lambda:util.pread2(cmd)) except util.CommandException, inst: raise xs_errors.XenError('LVMUnMount', \ opterr='lvchange failed error is %d' % inst.code)
def _checkpath(self, path): try: if not util.ioretry(lambda: util.pathexists(path)): return False return True except util.CommandException, inst: raise xs_errors.XenError('EIO', \ opterr='IO error checking path %s' % path)
def detach(self, sr_uuid): if not util.ioretry(lambda: self._checkmount()): return try: util.pread(["umount", self.path]) os.rmdir(self.path) except util.CommandException, inst: raise xs_errors.XenError('NFSUnMount', \ opterr='error is %d' % inst.code)
def create(self, sr_uuid, size): """ Create the SR. The path must not already exist, or if it does, it must be empty. (This accounts for the case where the user has mounted a device onto a directory manually and want to use this as the root of a file-based SR.) """ try: if util.ioretry(lambda: util.pathexists(self.remotepath)): if len(util.ioretry(lambda: util.listdir(self.remotepath))) != 0: raise xs_errors.XenError("SRExists") else: try: util.ioretry(lambda: os.mkdir(self.remotepath)) except util.CommandException, inst: if inst.code == errno.EEXIST: raise xs_errors.XenError("SRExists") else: raise xs_errors.XenError("FileSRCreate", opterr="directory creation failure %d" % inst.code) except: raise xs_errors.XenError("FileSRCreate")
def scan_srlist(path): dom = xml.dom.minidom.Document() element = dom.createElement("SRlist") dom.appendChild(element) for val in filter(util.match_uuid, util.ioretry( \ lambda: util.listdir(path))): fullpath = os.path.join(path, val) if not util.ioretry(lambda: util.isdir(fullpath)): continue entry = dom.createElement('SR') element.appendChild(entry) subentry = dom.createElement("UUID") entry.appendChild(subentry) textnode = dom.createTextNode(val) subentry.appendChild(textnode) return dom.toprettyxml()
try: self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) except util.CommandException, inst: raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ "path %s (%s)" % (self.path, inst)) for uuid in self.vhds.iterkeys(): if self.vhds[uuid].error: raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) self.vdis[uuid] = self.vdi(uuid, True) # Get the key hash of any encrypted VDIs: vhd_path = os.path.join(self.path, self.vhds[uuid].path) key_hash = vhdutil.getKeyHash(vhd_path) self.vdis[uuid].sm_config_override['key_hash'] = key_hash # raw VDIs and CBT log files files = util.ioretry(lambda: util.listdir(self.path)) for fn in files: if fn.endswith(vhdutil.FILE_EXTN_RAW): uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] self.vdis[uuid] = self.vdi(uuid, True) elif fn.endswith(CBTLOG_TAG): cbt_uuid = fn.split(".")[0] # If an associated disk exists, update CBT status # else create new VDI of type cbt_metadata if self.vdis.has_key(cbt_uuid): self.vdis[cbt_uuid].cbt_enabled = True else: new_vdi = self.vdi(cbt_uuid) new_vdi.ty = "cbt_metadata" new_vdi.cbt_enabled = True self.vdis[cbt_uuid] = new_vdi
def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) args = [] args.append("vdi_clone") args.append(self.sr.uuid) args.append(self.uuid) dest = None dst = None if snap_type == VDI.SNAPSHOT_DOUBLE: dest = util.gen_uuid() dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) args.append(dest) if self.hidden: raise xs_errors.XenError('VDIClone', opterr='hidden VDI') depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError('VDIUnavailable', \ opterr='failed to get VHD depth') elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError('SnapshotChainTooLong') # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() num_vdis = 2 if (snap_type == VDI.SNAPSHOT_SINGLE or snap_type == VDI.SNAPSHOT_INTERNAL): num_vdis = 1 if (sr_size - reserved) < ((self.size + VDI.VDIMetadataSize( \ vhdutil.VDI_TYPE_VHD, self.size)) * num_vdis): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) newsrcname = "%s.%s" % (newuuid, self.vdi_type) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (self.uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: util.ioretry(lambda: os.rename(src, newsrc)) # Create the snapshot under a temporary name, then rename # it afterwards. This avoids a small window where it exists # but is invalid. We do not need to do this for # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed # before so nobody will try to query it. tmpsrc = "%s.%s" % (src, "new") util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) util.ioretry(lambda: os.rename(tmpsrc, src)) if snap_type == VDI.SNAPSHOT_DOUBLE: util.ioretry(lambda: self._snap(dst, newsrcname)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) #Verify parent locator field of both children and delete newsrc if unused introduce_parent = True try: srcparent = util.ioretry(lambda: self._query_p_uuid(src)) dstparent = None if snap_type == VDI.SNAPSHOT_DOUBLE: dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) if srcparent != newuuid and \ (snap_type == VDI.SNAPSHOT_SINGLE or \ snap_type == VDI.SNAPSHOT_INTERNAL or \ dstparent != newuuid): util.ioretry(lambda: os.unlink(newsrc)) introduce_parent = False except: pass # Introduce the new VDI records leaf_vdi = None if snap_type == VDI.SNAPSHOT_DOUBLE: leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI leaf_vdi.read_only = False leaf_vdi.location = dest leaf_vdi.size = self.size leaf_vdi.utilisation = self.utilisation leaf_vdi.sm_config = {} leaf_vdi.sm_config['vhd-parent'] = dstparent # If the parent is encrypted set the key_hash # for the new snapshot disk vdi_ref = self.sr.srcmd.params['vdi_ref'] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if "key_hash" in sm_config: leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] # If we have CBT enabled on the VDI, # set CBT status for the new snapshot disk if cbtlog: leaf_vdi.cbt_enabled = True base_vdi = None if introduce_parent: base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent base_vdi.label = "base copy" base_vdi.read_only = True base_vdi.location = newuuid base_vdi.size = self.size base_vdi.utilisation = self.utilisation base_vdi.sm_config = {} grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) if grandparent.find("no parent") == -1: base_vdi.sm_config['vhd-parent'] = grandparent try: if snap_type == VDI.SNAPSHOT_DOUBLE: leaf_vdi_ref = leaf_vdi._db_introduce() util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ (leaf_vdi_ref,dest)) if introduce_parent: base_vdi_ref = base_vdi._db_introduce() self.session.xenapi.VDI.set_managed(base_vdi_ref, False) util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) vdi_ref = self.sr.srcmd.params['vdi_ref'] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) sm_config['vhd-parent'] = srcparent self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) except Exception, e: util.SMlog( "vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) # Note it's too late to actually clean stuff up here: the base disk has # been marked as deleted already. util.end_log_entry(self.sr.path, self.path, ["error"]) raise except util.CommandException, inst: # XXX: it might be too late if the base disk has been marked as deleted! self._clonecleanup(src, dst, newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise xs_errors.XenError('VDIClone', opterr='VDI clone failed error %d' % inst.code)
def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) args = [] args.append("vdi_clone") args.append(self.sr.uuid) args.append(self.uuid) dest = None dst = None if snap_type == VDI.SNAPSHOT_DOUBLE: dest = util.gen_uuid() dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) args.append(dest) if self.hidden: raise xs_errors.XenError('VDIClone', opterr='hidden VDI') depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError('VDIUnavailable', \ opterr='failed to get VHD depth') elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError('SnapshotChainTooLong') # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() num_vdis = 2 if (snap_type == VDI.SNAPSHOT_SINGLE or snap_type == VDI.SNAPSHOT_INTERNAL): num_vdis = 1 if (sr_size - reserved) < ((self.size + VDI.VDIMetadataSize( \ vhdutil.VDI_TYPE_VHD, self.size)) * num_vdis): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) newsrcname = "%s.%s" % (newuuid, self.vdi_type) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (self.uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: try: util.ioretry(lambda: os.rename(src, newsrc)) except util.CommandException, inst: if inst.code != errno.ENOENT: # failed to rename, simply raise error util.end_log_entry(self.sr.path, self.path, ["error"]) raise try: # Create the snapshot under a temporary name, then rename # it afterwards. This avoids a small window where it exists # but is invalid. We do not need to do this for # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed # before so nobody will try to query it. tmpsrc = "%s.%s" % (src, "new") util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) util.ioretry(lambda: os.rename(tmpsrc, src)) if snap_type == VDI.SNAPSHOT_DOUBLE: util.ioretry(lambda: self._snap(dst, newsrcname)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) except util.CommandException, inst: if inst.code != errno.EIO: raise
def _loadvdis(self): if self.vdis: return pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) try: self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) except util.CommandException as inst: raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ "path %s (%s)" % (self.path, inst)) try: list_vhds = [ FileVDI.extractUuid(v) for v in util.ioretry(lambda: glob.glob(pattern)) ] if len(self.vhds) != len(list_vhds): util.SMlog("VHD scan returns %d VHDs: %s" % (len(self.vhds), sorted(list(self.vhds)))) util.SMlog("VHD list returns %d VHDs: %s" % (len(list_vhds), sorted(list_vhds))) except: pass for uuid in self.vhds.iterkeys(): if self.vhds[uuid].error: raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) self.vdis[uuid] = self.vdi(uuid, True) # Get the key hash of any encrypted VDIs: vhd_path = os.path.join(self.path, self.vhds[uuid].path) key_hash = vhdutil.getKeyHash(vhd_path) self.vdis[uuid].sm_config_override['key_hash'] = key_hash # raw VDIs and CBT log files files = util.ioretry(lambda: util.listdir(self.path)) for fn in files: if fn.endswith(vhdutil.FILE_EXTN_RAW): uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] self.vdis[uuid] = self.vdi(uuid, True) elif fn.endswith(CBTLOG_TAG): cbt_uuid = fn.split(".")[0] # If an associated disk exists, update CBT status # else create new VDI of type cbt_metadata if cbt_uuid in self.vdis: self.vdis[cbt_uuid].cbt_enabled = True else: new_vdi = self.vdi(cbt_uuid) new_vdi.ty = "cbt_metadata" new_vdi.cbt_enabled = True self.vdis[cbt_uuid] = new_vdi # Mark parent VDIs as Read-only and generate virtual allocation self.virtual_allocation = 0 for uuid, vdi in self.vdis.iteritems(): if vdi.parent: if vdi.parent in self.vdis: self.vdis[vdi.parent].read_only = True if vdi.parent in geneology: geneology[vdi.parent].append(uuid) else: geneology[vdi.parent] = [uuid] if not vdi.hidden: self.virtual_allocation += (vdi.size) # now remove all hidden leaf nodes from self.vdis so that they are not # introduced into the Agent DB when SR is synchronized. With the # asynchronous GC, a deleted VDI might stay around until the next # SR.scan, so if we don't ignore hidden leaves we would pick up # freshly-deleted VDIs as newly-added VDIs for uuid in self.vdis.keys(): if uuid not in geneology and self.vdis[uuid].hidden: util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) del self.vdis[uuid]
class SMBSR(FileSR.FileSR): """SMB file-based storage repository""" def handles(type): return type == 'smb' handles = staticmethod(handles) def load(self, sr_uuid): self.ops_exclusive = FileSR.OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = SR.DEFAULT_TAP self.driver_config = DRIVER_CONFIG if not self.dconf.has_key('server'): raise xs_errors.XenError('ConfigServerMissing') self.remoteserver = self.dconf['server'] if self.sr_ref and self.session is not None : self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) else: self.sm_config = self.srcmd.params.get('sr_sm_config') or {} self.credentials = None self.mountpoint = os.path.join(SR.MOUNT_BASE, 'SMB', self.__extract_server(), sr_uuid) self.linkpath = os.path.join(self.mountpoint, sr_uuid or "") # Remotepath is the absolute path inside a share that is to be mounted # For a SMB SR, only the root can be mounted. self.remotepath = '' self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) self._check_o_direct() def checkmount(self): return util.ioretry(lambda: ((util.pathexists(self.mountpoint) and \ util.ismount(self.mountpoint)) and \ util.pathexists(self.linkpath))) def mount(self, mountpoint=None): """Mount the remote SMB export at 'mountpoint'""" if mountpoint == None: mountpoint = self.mountpoint elif not util.is_string(mountpoint) or mountpoint == "": raise SMBException("mountpoint not a string object") try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise SMBException("Failed to make directory: code is %d" % inst.code) self.credentials = os.path.join("/tmp", util.gen_uuid()) options = self.getMountOptions() if options: options = ",".join(str(x) for x in options if x) try: util.ioretry(lambda: util.pread(["mount.cifs", self.remoteserver, mountpoint, "-o", options]), errlist=[errno.EPIPE, errno.EIO], maxretry=2, nofail=True) except util.CommandException, inst: raise SMBException("mount failed with return code %d" % inst.code)
def clone(self, sr_uuid, vdi_uuid, dest): args = [] args.append("vdi_clone") args.append(sr_uuid) args.append(vdi_uuid) args.append(dest) # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() if (sr_size - reserved) < \ ((self.size + VDI.VDIMetadataSize(SR.DEFAULT_TAP, self.size))*2): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (vdi_uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: try: util.ioretry(lambda: os.rename(src, newsrc)) except util.CommandException as inst: if inst.code != errno.ENOENT: self._clonecleanup(src, dst, newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise try: util.ioretry(lambda: self._dualsnap(src, dst, newsrc)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) except util.CommandException as inst: if inst.code != errno.EIO: self._clonecleanup(src, dst, newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise #Verify parent locator field of both children and delete newsrc if unused try: srcparent = util.ioretry(lambda: self._query_p_uuid(src)) dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) if srcparent != newuuid and dstparent != newuuid: util.ioretry(lambda: os.unlink(newsrc)) except: pass except util.CommandException as inst: self._clonecleanup(src, dst, newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise xs_errors.XenError('VDIClone', opterr='VDI clone failed error %d' % inst.code) util.end_log_entry(self.sr.path, self.path, ["done"])
def _checkmount(self): return util.ioretry(lambda: util.pathexists(self.path)) \ and util.ioretry(lambda: util.ismount(self.path))
def ioretry(cmd): return util.ioretry(lambda: util.pread2(cmd), errlist = [errno.EIO, errno.EAGAIN])
if self.vdis: return pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) try: self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) except util.CommandException, inst: raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ "path %s (%s)" % (self.path, inst)) for uuid in self.vhds.iterkeys(): if self.vhds[uuid].error: raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) self.vdis[uuid] = self.vdi(uuid, True) # raw VDIs and CBT log files files = util.ioretry(lambda: util.listdir(self.path)) for fn in files: if fn.endswith(vhdutil.FILE_EXTN_RAW): uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] self.vdis[uuid] = self.vdi(uuid, True) elif fn.endswith(CBTLOG_TAG): cbt_uuid = fn.split(".")[0] # If an associated disk exists, update CBT status # else create new VDI of type cbt_metadata if self.vdis.has_key(cbt_uuid): self.vdis[cbt_uuid].cbt_enabled = True else: new_vdi = self.vdi(cbt_uuid) new_vdi.ty = "cbt_metadata" new_vdi.cbt_enabled = True self.vdis[cbt_uuid] = new_vdi
def _rename(self, oldName, newName): try: util.ioretry(lambda: os.rename(oldName, newName)) except util.CommandException, inst: pass
def _clonecleanup(self, src, dst, newsrc): try: util.ioretry(lambda: os.unlink(src)) except util.CommandException, inst: pass
def _getlockstatus(self): if util.ioretry(lambda: util.pathexists(self.sr.path)): if len(filter(self.match_locks, util.ioretry(lambda: \ util.listdir(self.sr.path)))) > 0: return True return False
# Set the target path temporarily to the base dir # so that we can create the target SR directory self.remotepath = self.dconf['serverpath'].encode('utf-8') try: self.mount_remotepath(sr_uuid) except Exception, exn: try: os.rmdir(self.path) except: pass raise exn if not self.nosubdir: newpath = os.path.join(self.path, sr_uuid) if util.ioretry(lambda: util.pathexists(newpath)): if len(util.ioretry(lambda: util.listdir(newpath))) != 0: self.detach(sr_uuid) raise xs_errors.XenError('SRExists') else: try: util.ioretry(lambda: util.makedirs(newpath)) except util.CommandException, inst: if inst.code != errno.EEXIST: self.detach(sr_uuid) raise xs_errors.XenError('NFSCreate', opterr='remote directory creation error is %d' % inst.code) self.detach(sr_uuid) def delete(self, sr_uuid):
def load(self, vdi_uuid): super(NFSVDI, self).load(vdi_uuid) self.lockable = True self.locked = util.ioretry(lambda: self._getlockstatus())
class GlusterFSSR(FileSR.FileSR): """Gluster file-based storage repository""" def handles(sr_type): # fudge, because the parent class (FileSR) checks for smb to alter its behavior return sr_type == 'glusterfs' or sr_type == 'smb' handles = staticmethod(handles) def load(self, sr_uuid): self.ops_exclusive = FileSR.OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = SR.DEFAULT_TAP self.driver_config = DRIVER_CONFIG if 'server' not in self.dconf: raise xs_errors.XenError('ConfigServerMissing') self.remoteserver = self.dconf['server'] if self.sr_ref and self.session is not None: self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) else: self.sm_config = self.srcmd.params.get('sr_sm_config') or {} self.mountpoint = os.path.join(SR.MOUNT_BASE, 'GlusterFS', self.remoteserver.split(':')[0], sr_uuid) self.linkpath = os.path.join(self.mountpoint, sr_uuid or "") self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) self._check_o_direct() def checkmount(self): return util.ioretry( lambda: ((util.pathexists(self.mountpoint) and util.ismount( self.mountpoint)) and util.pathexists(self.linkpath))) def mount(self, mountpoint=None): """Mount the remote gluster export at 'mountpoint'""" if mountpoint is None: mountpoint = self.mountpoint elif not util.is_string(mountpoint) or mountpoint == "": raise GlusterFSException("mountpoint not a string object") try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise GlusterFSException("Failed to make directory: code is %d" % inst.code) try: options = [] if 'backupservers' in self.dconf: options.append('backup-volfile-servers=' + self.dconf['backupservers']) if 'fetchattempts' in self.dconf: options.append('fetch-attempts=' + self.dconf['fetchattempts']) if options: options = ['-o', ','.join(options)] command = [ "mount", '-t', 'glusterfs', self.remoteserver, mountpoint ] + options util.ioretry(lambda: util.pread(command), errlist=[errno.EPIPE, errno.EIO], maxretry=2, nofail=True) except util.CommandException, inst: syslog(_syslog.LOG_ERR, 'GlusterFS mount failed ' + inst.__str__()) raise GlusterFSException("mount failed with return code %d" % inst.code)
def _checkmount(self): return util.ioretry(lambda: util.pathexists(self.path) and \ (util.ismount(self.path) or \ util.pathexists(self.remotepath) and self._isbind()))
def checkmount(self): return util.ioretry( lambda: ((util.pathexists(self.mountpoint) and util.ismount( self.mountpoint)) and util.pathexists(self.linkpath)))
def load(self, vdi_uuid): self.lock = self.sr.lock if self.sr.srcmd.cmd == "vdi_create": self.vdi_type = vhdutil.VDI_TYPE_VHD if self.sr.srcmd.params.has_key("vdi_sm_config") and \ self.sr.srcmd.params["vdi_sm_config"].has_key("type"): vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] if not self.VDI_TYPE.get(vdi_type): raise xs_errors.XenError('VDIType', opterr='Invalid VDI type %s' % vdi_type) self.vdi_type = self.VDI_TYPE[vdi_type] self.path = os.path.join(self.sr.path, "%s%s" % \ (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) else: vhd_path = os.path.join(self.sr.path, "%s.%s" % \ (vdi_uuid, self.PARAM_VHD)) if util.ioretry(lambda: util.pathexists(vhd_path)): self.vdi_type = vhdutil.VDI_TYPE_VHD self.path = vhd_path else: raw_path = os.path.join(self.sr.path, "%s.%s" % \ (vdi_uuid, self.PARAM_RAW)) self.vdi_type = vhdutil.VDI_TYPE_RAW self.path = raw_path self.hidden = False if not util.ioretry(lambda: util.pathexists(self.path)): if self.sr.srcmd.cmd == "vdi_attach_from_config": return raise xs_errors.XenError('VDIUnavailable', opterr="%s not found" % self.path) if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): # VHD info already preloaded: use it instead of querying directly vhdInfo = self.sr.vhds[vdi_uuid] self.utilisation = vhdInfo.sizePhys self.size = vhdInfo.sizeVirt self.hidden = vhdInfo.hidden if self.hidden: self.managed = False self.parent = vhdInfo.parentUuid if self.parent: self.sm_config_override = {'vhd-parent': self.parent} else: self.sm_config_override = {'vhd-parent': None} return try: # Change to the SR directory in case parent # locator field path has changed os.chdir(self.sr.path) except: raise xs_errors.XenError('SRUnavailable') if util.ioretry(lambda: util.pathexists(self.path)): try: st = util.ioretry(lambda: os.stat(self.path)) self.utilisation = long(st.st_size) except util.CommandException, inst: if inst.code == errno.EIO: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path) else: raise xs_errors.XenError('VDIType', \ opterr='Invalid VDI type %s' % self.vdi_type) if self.vdi_type == vhdutil.VDI_TYPE_RAW: self.exists = True self.size = self.utilisation self.sm_config_override = {'type': self.PARAM_RAW} return try: diskinfo = util.ioretry(lambda: self._query_info(self.path)) if diskinfo.has_key('parent'): self.parent = diskinfo['parent'] self.sm_config_override = {'vhd-parent': self.parent} else: self.sm_config_override = {'vhd-parent': None} self.parent = '' self.size = long(diskinfo['size']) * 1024 * 1024 self.hidden = long(diskinfo['hidden']) if self.hidden: self.managed = False self.exists = True except util.CommandException, inst: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path)
if self.checkmount(): raise SR.SROSError(113, 'GlusterFS mount point already attached') try: self.mount() except GlusterFSException, exc: # noinspection PyBroadException try: os.rmdir(self.mountpoint) except: # we have no recovery strategy pass raise SR.SROSError( 111, "GlusterFS mount error [opterr=%s]" % exc.errstr) if util.ioretry(lambda: util.pathexists(self.linkpath)): if len(util.ioretry(lambda: util.listdir(self.linkpath))) != 0: self.detach(sr_uuid) raise xs_errors.XenError('SRExists') else: try: util.ioretry(lambda: util.makedirs(self.linkpath)) os.symlink(self.linkpath, self.path) except util.CommandException, inst: if inst.code != errno.EEXIST: try: self.unmount(self.mountpoint, True) except GlusterFSException: util.logException('GlusterFSSR.unmount()') raise SR.SROSError( 116,
mountcommand = 'mount.nfs4' if timeout < 1: timeout = SOFTMOUNT_TIMEOUT options = "soft,timeo=%d,retrans=%d,proto=%s,vers=%s" % ( timeout * 10, SOFTMOUNT_RETRANS, transport, nfsversion) options += ',acdirmin=0,acdirmax=0' if useroptions != '': options += ",%s" % useroptions try: util.ioretry(lambda: util.pread([ mountcommand, "%s:%s" % (remoteserver, remotepath), mountpoint, "-o", options ]), errlist=[errno.EPIPE, errno.EIO], maxretry=2, nofail=True) except util.CommandException, inst: raise NfsException("mount failed with return code %d" % inst.code) def unmount(mountpoint, rmmountpoint): """Unmount the mounted mountpoint""" try: util.pread(["umount", mountpoint]) except util.CommandException, inst: raise NfsException("umount failed with return code %d" % inst.code) if rmmountpoint:
def load(self, vdi_uuid): self.lock = self.sr.lock self.sr.srcmd.params['o_direct'] = self.sr.o_direct if self.sr.srcmd.cmd == "vdi_create": self.vdi_type = vhdutil.VDI_TYPE_VHD if self.sr.srcmd.params.has_key("vdi_sm_config") and \ self.sr.srcmd.params["vdi_sm_config"].has_key("type"): vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] if not self.VDI_TYPE.get(vdi_type): raise xs_errors.XenError('VDIType', opterr='Invalid VDI type %s' % vdi_type) self.vdi_type = self.VDI_TYPE[vdi_type] self.path = os.path.join(self.sr.path, "%s%s" % \ (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) else: found = self._find_path_with_retries(vdi_uuid) if not found: if self.sr.srcmd.cmd == "vdi_delete": # Could be delete for CBT log file self.path = os.path.join( self.sr.path, "%s.%s" % (vdi_uuid, self.PARAM_VHD)) return if self.sr.srcmd.cmd == "vdi_attach_from_config": return raise xs_errors.XenError('VDIUnavailable', opterr="VDI %s not found" % vdi_uuid) if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): # VHD info already preloaded: use it instead of querying directly vhdInfo = self.sr.vhds[vdi_uuid] self.utilisation = vhdInfo.sizePhys self.size = vhdInfo.sizeVirt self.hidden = vhdInfo.hidden if self.hidden: self.managed = False self.parent = vhdInfo.parentUuid if self.parent: self.sm_config_override = {'vhd-parent': self.parent} else: self.sm_config_override = {'vhd-parent': None} return try: # Change to the SR directory in case parent # locator field path has changed os.chdir(self.sr.path) except Exception as chdir_exception: util.SMlog("Unable to change to SR directory, SR unavailable, %s" % str(chdir_exception)) raise xs_errors.XenError('SRUnavailable', opterr=str(chdir_exception)) if util.ioretry(lambda: util.pathexists(self.path), errlist=[errno.EIO, errno.ENOENT]): try: st = util.ioretry(lambda: os.stat(self.path), errlist=[errno.EIO, errno.ENOENT]) self.utilisation = long(st.st_size) except util.CommandException, inst: if inst.code == errno.EIO: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path) else: util.SMlog("Stat failed for %s, %s" % (self.path, str(inst))) raise xs_errors.XenError('VDIType', \ opterr='Invalid VDI type %s' % self.vdi_type) if self.vdi_type == vhdutil.VDI_TYPE_RAW: self.exists = True self.size = self.utilisation self.sm_config_override = {'type': self.PARAM_RAW} return if self.vdi_type == CBTLOG_TAG: self.exists = True self.size = self.utilisation return try: # The VDI might be activated in R/W mode so the VHD footer # won't be valid, use the back-up one instead. diskinfo = util.ioretry( lambda: self._query_info(self.path, True), errlist=[errno.EIO, errno.ENOENT]) if diskinfo.has_key('parent'): self.parent = diskinfo['parent'] self.sm_config_override = {'vhd-parent': self.parent} else: self.sm_config_override = {'vhd-parent': None} self.parent = '' self.size = long(diskinfo['size']) * 1024 * 1024 self.hidden = long(diskinfo['hidden']) if self.hidden: self.managed = False self.exists = True except util.CommandException, inst: raise xs_errors.XenError('VDILoad', \ opterr='Failed load VDI information %s' % self.path)