def _getVMs(self): master = self.getMasterUUID() cmd = "xe vm-list dom-id=0 resident-on=%s params=uuid --minimal" % \ master stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) masterVM = stdout.strip() if not tutil.validateUUID(masterVM): raise SMException("Got invalid UUID: %s" % masterVM) slaveVM = None host = self.getThisHost() if host == master: cmd = "xe host-list params=uuid --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) hosts = stdout.strip().split(",") for h in hosts: if h != master: host = h break if host == master: return (masterVM, None) cmd = "xe vm-list dom-id=0 resident-on=%s params=uuid --minimal" % host stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) slaveVM = stdout.strip() if not tutil.validateUUID(slaveVM): raise SMException("Got invalid UUID: %s" % slaveVM) return (masterVM, slaveVM)
def _getPoolUUID(self): cmd = "xe pool-list params=uuid --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) poolUuid = stdout.strip() if not tutil.validateUUID(poolUuid): raise SMException("Got invalid UUID: %s" % poolUuid) return poolUuid
def _createVBD(self, vdi, vm, ro = False, vbdLetter = None, unpluggable = True): """Creates a VBD for the specified VDI on the specified VM. If a device is not supplied (vbdLetter), the first available one is used. Returns the UUID of the VBD.""" mode = "rw" if ro: mode="ro" if None == vbdLetter: devices = self._vm_get_allowed_vbd_devices(vm) assert len(devices) > 0 # FIXME raise exception instead vbdLetter = devices[0] cmd = "xe vbd-create vm-uuid=%s vdi-uuid=%s type=disk mode=%s "\ "device=%s unpluggable=%s" % (vm, vdi, mode, vbdLetter, unpluggable) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) # XXX xe vbd-create returns 0 if the device name is invalid if not stdout: raise SMException("No output from vbd-create") vbdUuid = stdout.strip() if not tutil.validateUUID(vbdUuid): raise SMException("Got invalid UUID: %s" % vbdUuid) return vbdUuid
def _getDefaultSR(self): cmd = "xe pool-param-get param-name=default-SR uuid=%s --minimal" % \ self._getPoolUUID() stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) uuid = stdout.strip() if tutil.validateUUID(uuid): return uuid return None
def _getDom0UUID(self): cmd = "xe vm-list dom-id=0 params=uuid --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vm-list") dom0uuid = stdout.strip() if not tutil.validateUUID(dom0uuid): raise SMException("Got invalid UUID: %s" % dom0uuid) return dom0uuid
def _createSR(self, type, size): cmd = "xe sr-create name-label='%s' physical-size=%d \ content-type=user device-config:device=%s type=%s" % \ (self.SR_LABEL, size, self.targetDevice, type) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from sr-create") srUuid = stdout.strip() if not tutil.validateUUID(srUuid): raise SMException("Got invalid UUID: %s" % srUuid) return srUuid
def _createVBD(self, vdi, vbdLetter, ro, vm): mode = "rw" if ro: mode="ro" cmd = "xe vbd-create vm-uuid=%s vdi-uuid=%s type=disk mode=%s "\ "device=xvd%s unpluggable=true" % (vm, vdi, mode, vbdLetter) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vbd-create") vbdUuid = stdout.strip() if not tutil.validateUUID(vbdUuid): raise SMException("Got invalid UUID: %s" % vbdUuid) return vbdUuid
def _createVDI(self, sr, size, name, raw): cmd = "xe vdi-create sr-uuid=%s name-label='%s' \ type=user virtual-size=%d" % (sr, name, size) if raw: cmd += " sm-config:type=raw" else: cmd += " sm-config:type=vhd" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vdi-create") vdiUuid = stdout.strip() if not tutil.validateUUID(vdiUuid): raise SMException("Got invalid UUID: %s" % vdiUuid) return vdiUuid
def _cloneVDI(self, vdi): cmd = "xe vdi-clone uuid=%s" % vdi for i in range(CMD_NUM_RETRIES): try: stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) break except tutil.CommandException as inst: if str(inst).find("VDI_IN_USE") != -1: self.logger.log("Command failed, retrying", LOG_LEVEL_CMD) time.sleep(CMD_RETRY_PERIOD) else: raise if not stdout: raise SMException("No output from vdi-snapshot") cloneUuid = stdout.strip() if not tutil.validateUUID(cloneUuid): raise SMException("Got invalid UUID: %s" % cloneUuid) return cloneUuid
def _createVDI(self, sr, size = 2**30, name = None, raw = False): """Creates a VDI on the specified SR using the specified size and name. The raw argument controls whether the VDI to be created must be of raw or VHD format. Returns the UUID of the created VDI.""" if None == name: name = ''.join(random.choice(string.hexdigits) for x in range(3)) cmd = "xe vdi-create sr-uuid=%s name-label='%s' \ type=user virtual-size=%d name-description=test-vdi" \ % (sr, name, size) if raw: cmd += " sm-config:type=raw" else: cmd += " sm-config:type=vhd" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vdi-create") vdiUuid = stdout.strip() if not tutil.validateUUID(vdiUuid): raise SMException("Got invalid UUID: %s" % vdiUuid) return vdiUuid
def _snapshotVDI(self, vdi, single=None): cmd = "xe vdi-snapshot uuid=%s" % vdi if single: cmd += " driver-params:type=single" for i in range(CMD_NUM_RETRIES): try: stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) break except tutil.CommandException as inst: if str(inst).find("VDI_IN_USE") != -1: self.logger.log("Command failed, retrying", LOG_LEVEL_CMD) time.sleep(CMD_RETRY_PERIOD) else: raise if not stdout: raise SMException("No output from vdi-snapshot") snapUuid = stdout.strip() if not tutil.validateUUID(snapUuid): raise SMException("Got invalid UUID: %s" % snapUuid) return snapUuid
class StorageManagerCLI(StorageManager): "Manage local XenServer storage through CLI" def __init__(self, logger): StorageManager.__init__(self, logger) def _getMasterUUID(self): cmd = "xe pool-list params=master --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) master = stdout.strip() return master def _getDom0UUID(self): cmd = "xe vm-list dom-id=0 params=uuid --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vm-list") dom0uuid = stdout.strip() if not tutil.validateUUID(dom0uuid): raise SMException("Got invalid UUID: %s" % dom0uuid) return dom0uuid def _getPoolUUID(self): cmd = "xe pool-list params=uuid --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) poolUuid = stdout.strip() if not tutil.validateUUID(poolUuid): raise SMException("Got invalid UUID: %s" % poolUuid) return poolUuid def _getDefaultSR(self): """Retrieves the UUID of the default SR of the pool.""" cmd = "xe pool-param-get param-name=default-SR uuid=%s --minimal" % \ self._getPoolUUID() stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) uuid = stdout.strip() if tutil.validateUUID(uuid): return uuid return None def _getInfoSR(self, uuid): """Retrieves the parameters of the specified SR in the form of a dictionary.""" cmd = "xe sr-list uuid=%s params=all" % uuid stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) info = self._toDict(stdout) if not info["sm-config"]: info["sm-config"] = dict() return info def _refreshSR(self, sr): """"Refreshes" the list of SRs by performing an SR scan.""" # TODO What is the use of the refresh? cmd = "xe sr-scan uuid=%s" % sr tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) def _getInfoPBD(self, uuid): cmd = "xe pbd-list uuid=%s params=all" % uuid stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) return self._toDict(stdout) def _getInfoVDI(self, uuid): """Retrieves the parameters of the spcified VDI in the form of a dictionary.""" cmd = "xe vdi-list uuid=%s params=all" % uuid stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) return self._toDict(stdout) def _createSR(self, type, size): cmd = "xe sr-create name-label='%s' physical-size=%d \ content-type=user device-config:device=%s type=%s" % \ (self.SR_LABEL, size, self.targetDevice, type) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from sr-create") srUuid = stdout.strip() if not tutil.validateUUID(srUuid): raise SMException("Got invalid UUID: %s" % srUuid) return srUuid def _getParam(self, obj, uuid, param): """Retrieves the specified parameter(s) of the specified object. Arguments: object: An object can be anything that would render the "<obj>-list" string valid for passing it as an argument to a "xe" command, e.g. sr-list, vdi-list etc. The uuid is the UUID of the desired object. uuid: The UUID of the specified object. param: The desired parameter, e.g. all, allow-caching, etc.""" cmd = "xe %s-list uuid=%s params=%s" % (obj, uuid, param) return tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) def _setParam(self, obj, uuid, param, val, set = True): cmd = "xe %s-param-set uuid=%s %s=%s" % (obj, uuid, param, val) if not set: cmd = "xe %s-param-clear uuid=%s param-name=%s" % (obj, uuid, param) tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) def _probe(self, type, dev): cmd = "xe sr-probe type=%s device-config:device=%s" % (type, dev) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) return stdout def _findSR(self, type): "Retrieves all SRs of the specified type in the form of a list." cmd = "xe sr-list type=%s --minimal" % type stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) stdout = stdout.strip() if not stdout: return [] srList = stdout.split(",") return srList def _findThisPBD(self, sr): """Retrieves the UUID of the PDB of the specified SR.""" cmd = "xe pbd-list sr-uuid=%s host-uuid=%s params=uuid --minimal" % \ (sr, self.getThisHost()) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) return stdout.strip() def _plugSR(self, sr): cmd = "xe pbd-list sr-uuid=%s params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) for uuid in stdout.split(","): cmd = "xe pbd-plug uuid=%s" % uuid tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) def _unplugSR(self, sr): cmd = "xe pbd-list sr-uuid=%s params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) for uuid in stdout.split(","): cmd = "xe pbd-unplug uuid=%s" % uuid tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) def _getVDIs(self, sr): """Retrieves the UUIDs of all the VDIs on the specified SR, in the form of a list.""" cmd = "xe vdi-list sr-uuid=%s params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) stdout = stdout.strip() if not stdout: return [] vdiList = stdout.split(",") return vdiList def _getLeafVDIs(self, sr): """Retrieves the UUIDs of the leaf VDIs on the specified SR in the form of a list.""" vdiList = self.getVDIs(sr) cmd = "xe vdi-list sr-uuid=%s name-label='base copy' params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) stdout = stdout.strip() if not stdout: baseList = [] baseList = stdout.split(",") # TODO Which VDIs does the name-label='base copy' return? leafList = [] for vdi in vdiList: if vdi not in baseList: leafList.append(vdi) return leafList # TODO Make name an optional parameter where if not specified, a random # one should be generated. XXX "a random one" what? def _createVDI(self, sr, size = 2**30, name = None, raw = False): """Creates a VDI on the specified SR using the specified size and name. The raw argument controls whether the VDI to be created must be of raw or VHD format. Returns the UUID of the created VDI.""" if None == name: name = ''.join(random.choice(string.hexdigits) for x in range(3)) cmd = "xe vdi-create sr-uuid=%s name-label='%s' \ type=user virtual-size=%d name-description=test-vdi" \ % (sr, name, size) if raw: cmd += " sm-config:type=raw" else: cmd += " sm-config:type=vhd" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vdi-create") vdiUuid = stdout.strip() if not tutil.validateUUID(vdiUuid): raise SMException("Got invalid UUID: %s" % vdiUuid) return vdiUuid def _cloneVDI(self, vdi): cmd = "xe vdi-clone uuid=%s" % vdi for i in range(CMD_NUM_RETRIES): try: stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) break except tutil.CommandException, inst: if str(inst).find("VDI_IN_USE") != -1: self.logger.log("Command failed, retrying", LOG_LEVEL_CMD) time.sleep(CMD_RETRY_PERIOD) else: raise if not stdout: raise SMException("No output from vdi-snapshot") cloneUuid = stdout.strip() if not tutil.validateUUID(cloneUuid): raise SMException("Got invalid UUID: %s" % cloneUuid) return cloneUuid
cmd += " driver-params:type=single" for i in range(CMD_NUM_RETRIES): try: stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) break except tutil.CommandException, inst: if str(inst).find("VDI_IN_USE") != -1: self.logger.log("Command failed, retrying", LOG_LEVEL_CMD) time.sleep(CMD_RETRY_PERIOD) else: raise if not stdout: raise SMException("No output from vdi-snapshot") snapUuid = stdout.strip() if not tutil.validateUUID(snapUuid): raise SMException("Got invalid UUID: %s" % snapUuid) return snapUuid def _resizeVDI(self, vdi, size, live): cmd = "xe vdi-resize uuid=%s disk-size=%d" % (vdi, size) if live: cmd += " online=true" tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) def _getInfoVBD(self, uuid): cmd = "xe vbd-list uuid=%s params=all" % uuid stdout = tutil.execCmd(cmd, 0, self.logger, 4) return self._toDict(stdout) def _createVBD(self, vdi, vm, ro = False, vbdLetter = None,
def main(argv): vm_uuid = None # UUID of the VM inside which biotest will run sr_uuid = None # UUID of the SR where the VDI will be created # total time, in seconds, for the test (includes time needed for the VM # to reboot) ttl = 180 ttr = 60 # timeout for the VM to restart size_gb = 1 # VDI size in GiB mbytes = 32 # number of MiB to be written by biotest caching_prob = .66 persistence_prob = .66 # Controls how much time will be spent performing plug-biotest-unplug # operations (after the cache/persistence mode has been selected). worktime = float(ttl) / 6 # Seed to use for the random number generator. Use the same seed across # runs to ensure consistent behavior. seed = None assert mbytes <= size_gb * (2**10) verbose = False # Parse the arguments. for arg in argv[1:]: fields = arg.split('=') if 'vm-uuid' == fields[0]: vm_uuid = fields[1] elif 'sr-uuid' == fields[0]: sr_uuid = fields[1] elif 'ttl' == fields[0]: ttl = int(fields[1]) elif 'ttr' == fields[0]: ttr = int(fields[1]) elif 'size' == fields[0]: size_gb = int(fields[1]) elif 'caching-prob' == fields[0]: caching_prob = float(fields[1]) assert caching_prob >= 0 and caching_prob <= 1 # FIXME elif 'persistence-prob' == fields[0]: persistence_prob = float(fields[1]) assert persistence_prob >= 0 and persistence_prob <= 1 # FIXME elif 'seed' == fields[0]: seed = fields[1] random.seed(seed) elif 'worktime' == fields[0]: worktime = int(fields[1]) elif '-v' == fields[0] or '--verbose' == fields[0]: verbose = True else: print 'invalid key \'' + fields[0] + '\'' return 2 logger.logger = tutil.Logger('/tmp/test_intellicache.log', 2) sm = storagemanager.StorageManager.getInstance(logger.logger) if None == vm_uuid: # If no VM has been specified, pick any one. # TODO not implemented print 'no VM specified' return os.EX_CONFIG if None == sr_uuid: # If no SR has been specified, pick one that has caching enabled. print 'no SR specified' # TODO not implemented return os.EX_CONFIG # Ensure the SR has caching enabled. sr_params = sm._getInfoSR(sr_uuid) if 'nfs' != sr_params['type'] or 'true' != sr_params['shared']: return os.EX_CONFIG # FIXME is the following necessary? #assert 'true' == sr_params['local-cache-enabled'] # Ensure caching has been enabled on the host and that there is a SR that # acts as a cache. local_cache_sr = sm._host_get_local_cache_sr() if not tutil.validateUUID(local_cache_sr): print 'caching not enabled' sys.exit(os.EX_CONFIG) # FIXME other power states not taken into account if not sm._vm_is_running(vm_uuid): sm._vm_start(vm_uuid) time.sleep(ttr) # Put the binaries in the VM. vm_ip = tutil.vm_get_ip(vm_uuid) assert None != vm_ip tutil.scp(vm_ip, '../biotest', '/var/tmp/biotest') # Create a VDI on the specified SR. vdi_uuid = sm._createVDI(sr_uuid, size_gb * (2**30)) if verbose: print 'test VDI is ' + vdi_uuid # The original VHD file that backs the VDI on the NFS SR. vdi_file = '/var/run/sr-mount/' + sr_uuid + '/' + vdi_uuid + '.vhd' # The VHD file on the local SR that backs the remote VDI. cache_file = '/var/run/sr-mount/' + local_cache_sr + '/' + vdi_uuid + \ '.vhdcache' # Snapshot the VDI so it can be cached. vdi_snapshot_uuid = sm._snapshotVDI(vdi_uuid) # Create a VDB for it. vbd_uuid = sm._createVBD(vdi_uuid, vm_uuid) cache_on = False first_iter = True endt = time.time() + ttl # stats stats_cached = 0 stats_total = 0 stats_plug_unplug_loops = 0 stats_persistent = 0 while time.time() < endt: # In the beginning of each iteration the VDI is expected to be # detached. # # Enable/disable caching on the VDI. vm_shutdown = False if caching_prob >= random.random(): # Select persistence mode. persistent = (persistence_prob >= random.random()) if not cache_on: # enable only if not already enabled sm._vm_shutdown(vm_uuid) sm._vdi_enable_caching(vdi_uuid, persistent) cache_on = True vm_shutdown = True elif sm._vdi_cache_persistence(vdi_uuid) != persistent: sm._vm_shutdown(vm_uuid) sm._vdi_cache_persistence(vdi_uuid, persistent) vm_shutdown = True else: # disable caching if cache_on: # If not already disabled, we need to shut down the VM first. sm._vm_shutdown(vm_uuid) sm._vdi_disable_caching(vdi_uuid) cache_on = False vm_shutdown = True # Ensure that the cache file is gone. assert not os.path.exists(cache_file) if verbose: print 'cache ' + str(cache_on), if cache_on: print ', persistent ' + str(persistent) else: print # If the VM was restarted, it's IP address may have changed. if True == vm_shutdown: sm._vm_start(vm_uuid) # We must wait for the VM to boot. time.sleep(ttr) sm._unplugVBD(vbd_uuid) vm_ip = tutil.vm_get_ip(vm_uuid) # Check existing data. if first_iter: first_iter = False else: sm._plugVBD(vbd_uuid) dev = '/dev/' + sm._vbd_get_bdev(vbd_uuid) tutil.ssh(vm_ip, '/var/tmp/biotest -t ' + dev + ' -m ' + \ str(mbytes) + ' -v') sm._unplugVBD(vbd_uuid) endt2 = time.time() + worktime stats_prev_plug_unplug_loops = stats_plug_unplug_loops while time.time() < endt2: # Store the old size of the cache file so we can ensure data are # actually written to the cache (as a sanity check). The first # loop must be skipped because no data have been written yet. if cache_on: if stats_prev_plug_unplug_loops > stats_plug_unplug_loops: cache_file_prev_size = os.path.getsize(cache_file) else: cache_file_prev_size = 0 # FIXME check that VDI grows, or, if in non-persistent mode, that # it doesn't grow vdi_file_prev_size = os.path.getsize(vdi_file) sm._plugVBD(vbd_uuid) dev = '/dev/' + sm._vbd_get_bdev(vbd_uuid) # XXX timeout based on volume size. Preliminary tests show that the # average speed for volumes larger than 64 MB is 2 MB/s, let's # assume .5 MB/s. tutil.ssh(vm_ip, '/var/tmp/biotest -t ' + dev + ' -m ' + \ str(mbytes), timeout = 2 * mbytes) stats_plug_unplug_loops += 1 if cache_on: # Ensure that the cache file has grown. assert os.path.getsize(cache_file) >= cache_file_prev_size new_size = os.path.getsize(vdi_file) if persistent: # Persistent cache mode: ensure the original VDI has grown. if new_size < vdi_file_prev_size: print 'new VDI file size (' + str(new_size) \ + ') should be bigger than old one (' \ + str(vdi_file_prev_size) + ')' assert False else: # Reset cache mode: no writes should reach the VDI on the # NFS SR if not new_size == vdi_file_prev_size: print 'VDI on the shared SR has been modified whilst in reset mode, old size ' + str( vdi_file_prev_size) + ', new size ' + str(new_size) sm._unplugVBD(vbd_uuid) # Update stats. assert stats_plug_unplug_loops >= stats_prev_plug_unplug_loops if stats_plug_unplug_loops > stats_prev_plug_unplug_loops: stats_total += 1 if cache_on: stats_cached += 1 if persistent: stats_persistent += 1 sm._destroyVBD(vbd_uuid) sm._destroyVDI(vdi_snapshot_uuid) sm._destroyVDI(vdi_uuid) print 'total ' + str(stats_total) + ', cached ' + str(stats_cached) \ + ', plug/unplug loops ' + str(stats_plug_unplug_loops) + \ ', persistent ' + str(stats_persistent) return 0
def main(argv): vm_uuid = None # UUID of the VM inside which biotest will run sr_uuid = None # UUID of the SR where the VDI will be created # total time, in seconds, for the test (includes time needed for the VM # to reboot) ttl = 180 ttr = 60 # timeout for the VM to restart size_gb = 1 # VDI size in GiB mbytes = 32 # number of MiB to be written by biotest caching_prob = .66 persistence_prob = .66 # Controls how much time will be spent performing plug-biotest-unplug # operations (after the cache/persistence mode has been selected). worktime = float(ttl) / 6 # Seed to use for the random number generator. Use the same seed across # runs to ensure consistent behavior. seed = None assert mbytes <= size_gb * (2**10) verbose = False # Parse the arguments. for arg in argv[1:]: fields = arg.split('=') if 'vm-uuid' == fields[0]: vm_uuid = fields[1] elif 'sr-uuid' == fields[0]: sr_uuid = fields[1] elif 'ttl' == fields[0]: ttl = int(fields[1]) elif 'ttr'== fields[0]: ttr = int(fields[1]) elif 'size' == fields[0]: size_gb =int(fields[1]) elif 'caching-prob' == fields[0]: caching_prob = float(fields[1]) assert caching_prob >= 0 and caching_prob <= 1 # FIXME elif 'persistence-prob' == fields[0]: persistence_prob = float(fields[1]) assert persistence_prob >= 0 and persistence_prob <= 1 # FIXME elif 'seed' == fields[0]: seed = fields[1] random.seed(seed) elif 'worktime' == fields[0]: worktime = int(fields[1]) elif '-v' == fields[0] or '--verbose' == fields[0]: verbose = True else: print 'invalid key \'' + fields[0] + '\'' return 2 logger.logger = tutil.Logger('/tmp/test_intellicache.log', 2) sm = storagemanager.StorageManager.getInstance(logger.logger) if None == vm_uuid: # If no VM has been specified, pick any one. # TODO not implemented print 'no VM specified' return os.EX_CONFIG if None == sr_uuid: # If no SR has been specified, pick one that has caching enabled. print 'no SR specified' # TODO not implemented return os.EX_CONFIG # Ensure the SR has caching enabled. sr_params = sm._getInfoSR(sr_uuid) if 'nfs'!= sr_params['type'] or 'true' != sr_params['shared']: return os.EX_CONFIG # FIXME is the following necessary? #assert 'true' == sr_params['local-cache-enabled'] # Ensure caching has been enabled on the host and that there is a SR that # acts as a cache. local_cache_sr = sm._host_get_local_cache_sr() if not tutil.validateUUID(local_cache_sr): print 'caching not enabled' sys.exit(os.EX_CONFIG) # FIXME other power states not taken into account if not sm._vm_is_running(vm_uuid): sm._vm_start(vm_uuid) time.sleep(ttr) # Put the binaries in the VM. vm_ip = tutil.vm_get_ip(vm_uuid) assert None != vm_ip tutil.scp(vm_ip, '../biotest', '/var/tmp/biotest') # Create a VDI on the specified SR. vdi_uuid = sm._createVDI(sr_uuid, size_gb * (2**30)) if verbose: print 'test VDI is ' + vdi_uuid # The original VHD file that backs the VDI on the NFS SR. vdi_file = '/var/run/sr-mount/' + sr_uuid + '/' + vdi_uuid + '.vhd' # The VHD file on the local SR that backs the remote VDI. cache_file = '/var/run/sr-mount/' + local_cache_sr + '/' + vdi_uuid + \ '.vhdcache' # Snapshot the VDI so it can be cached. vdi_snapshot_uuid = sm._snapshotVDI(vdi_uuid) # Create a VDB for it. vbd_uuid = sm._createVBD(vdi_uuid, vm_uuid) cache_on = False first_iter = True endt = time.time() + ttl # stats stats_cached = 0 stats_total = 0 stats_plug_unplug_loops = 0 stats_persistent = 0 while time.time() < endt: # In the beginning of each iteration the VDI is expected to be # detached. # # Enable/disable caching on the VDI. vm_shutdown = False if caching_prob >= random.random(): # Select persistence mode. persistent = (persistence_prob >= random.random()) if not cache_on: # enable only if not already enabled sm._vm_shutdown(vm_uuid) sm._vdi_enable_caching(vdi_uuid, persistent) cache_on = True vm_shutdown = True elif sm._vdi_cache_persistence(vdi_uuid) != persistent: sm._vm_shutdown(vm_uuid) sm._vdi_cache_persistence(vdi_uuid, persistent) vm_shutdown = True else: # disable caching if cache_on: # If not already disabled, we need to shut down the VM first. sm._vm_shutdown(vm_uuid) sm._vdi_disable_caching(vdi_uuid) cache_on = False vm_shutdown = True # Ensure that the cache file is gone. assert not os.path.exists(cache_file) if verbose: print 'cache ' + str(cache_on), if cache_on: print ', persistent ' + str(persistent) else: print # If the VM was restarted, it's IP address may have changed. if True == vm_shutdown: sm._vm_start(vm_uuid) # We must wait for the VM to boot. time.sleep(ttr) sm._unplugVBD(vbd_uuid) vm_ip = tutil.vm_get_ip(vm_uuid) # Check existing data. if first_iter: first_iter = False else: sm._plugVBD(vbd_uuid) dev = '/dev/' + sm._vbd_get_bdev(vbd_uuid) tutil.ssh(vm_ip, '/var/tmp/biotest -t ' + dev + ' -m ' + \ str(mbytes) + ' -v') sm._unplugVBD(vbd_uuid) endt2 = time.time() + worktime stats_prev_plug_unplug_loops = stats_plug_unplug_loops while time.time() < endt2: # Store the old size of the cache file so we can ensure data are # actually written to the cache (as a sanity check). The first # loop must be skipped because no data have been written yet. if cache_on: if stats_prev_plug_unplug_loops > stats_plug_unplug_loops: cache_file_prev_size = os.path.getsize(cache_file) else: cache_file_prev_size = 0 # FIXME check that VDI grows, or, if in non-persistent mode, that # it doesn't grow vdi_file_prev_size = os.path.getsize(vdi_file) sm._plugVBD(vbd_uuid) dev = '/dev/' + sm._vbd_get_bdev(vbd_uuid) # XXX timeout based on volume size. Preliminary tests show that the # average speed for volumes larger than 64 MB is 2 MB/s, let's # assume .5 MB/s. tutil.ssh(vm_ip, '/var/tmp/biotest -t ' + dev + ' -m ' + \ str(mbytes), timeout = 2 * mbytes) stats_plug_unplug_loops += 1 if cache_on: # Ensure that the cache file has grown. assert os.path.getsize(cache_file) >= cache_file_prev_size new_size = os.path.getsize(vdi_file) if persistent: # Persistent cache mode: ensure the original VDI has grown. if new_size < vdi_file_prev_size: print 'new VDI file size (' + str(new_size) \ + ') should be bigger than old one (' \ + str(vdi_file_prev_size) + ')' assert False else: # Reset cache mode: no writes should reach the VDI on the # NFS SR if not new_size == vdi_file_prev_size: print 'VDI on the shared SR has been modified whilst in reset mode, old size ' + str(vdi_file_prev_size) + ', new size ' + str(new_size) sm._unplugVBD(vbd_uuid) # Update stats. assert stats_plug_unplug_loops >= stats_prev_plug_unplug_loops if stats_plug_unplug_loops > stats_prev_plug_unplug_loops: stats_total += 1 if cache_on: stats_cached += 1 if persistent: stats_persistent += 1 sm._destroyVBD(vbd_uuid) sm._destroyVDI(vdi_snapshot_uuid) sm._destroyVDI(vdi_uuid) print 'total ' + str(stats_total) + ', cached ' + str(stats_cached) \ + ', plug/unplug loops ' + str(stats_plug_unplug_loops) + \ ', persistent ' + str(stats_persistent) return 0
class StorageManagerCLI(StorageManager): "Manage local XenServer storage through CLI" def __init__(self, logger): StorageManager.__init__(self, logger) def _getMasterUUID(self): cmd = "xe pool-list params=master --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) master = stdout.strip() return master def _getDom0UUID(self): cmd = "xe vm-list dom-id=0 params=uuid --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vm-list") dom0uuid = stdout.strip() if not tutil.validateUUID(dom0uuid): raise SMException("Got invalid UUID: %s" % dom0uuid) return dom0uuid def _getPoolUUID(self): cmd = "xe pool-list params=uuid --minimal" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) poolUuid = stdout.strip() if not tutil.validateUUID(poolUuid): raise SMException("Got invalid UUID: %s" % poolUuid) return poolUuid def _getDefaultSR(self): cmd = "xe pool-param-get param-name=default-SR uuid=%s --minimal" % \ self._getPoolUUID() stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) uuid = stdout.strip() if tutil.validateUUID(uuid): return uuid return None def _getInfoSR(self, uuid): cmd = "xe sr-list uuid=%s params=all" % uuid stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) info = self._toDict(stdout) if not info["sm-config"]: info["sm-config"] = dict() return info def _refreshSR(self, sr): cmd = "xe sr-scan uuid=%s" % sr tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) def _getInfoPBD(self, uuid): cmd = "xe pbd-list uuid=%s params=all" % uuid stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) return self._toDict(stdout) def _getInfoVDI(self, uuid): cmd = "xe vdi-list uuid=%s params=all" % uuid stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD + 1) return self._toDict(stdout) def _createSR(self, type, size): cmd = "xe sr-create name-label='%s' physical-size=%d \ content-type=user device-config:device=%s type=%s" % \ (self.SR_LABEL, size, self.targetDevice, type) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from sr-create") srUuid = stdout.strip() if not tutil.validateUUID(srUuid): raise SMException("Got invalid UUID: %s" % srUuid) return srUuid def _getParam(self, obj, uuid, param): cmd = "xe %s-list uuid=%s params=%s" % (obj, uuid, param) return tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) def _setParam(self, obj, uuid, param, val, set): cmd = "xe %s-param-set uuid=%s %s=%s" % (obj, uuid, param, val) if not set: cmd = "xe %s-param-clear uuid=%s param-name=%s" % (obj, uuid, param) tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) def _probe(self, type, dev): cmd = "xe sr-probe type=%s device-config:device=%s" % (type, dev) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) return stdout def _findSR(self, type): cmd = "xe sr-list type=%s --minimal" % type stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) stdout = stdout.strip() if not stdout: return [] srList = stdout.split(",") return srList def _findThisPBD(self, sr): cmd = "xe pbd-list sr-uuid=%s host-uuid=%s params=uuid --minimal" % \ (sr, self.getThisHost()) stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) return stdout.strip() def _plugSR(self, sr): cmd = "xe pbd-list sr-uuid=%s params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) for uuid in stdout.split(","): cmd = "xe pbd-plug uuid=%s" % uuid tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) def _unplugSR(self, sr): cmd = "xe pbd-list sr-uuid=%s params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) for uuid in stdout.split(","): cmd = "xe pbd-unplug uuid=%s" % uuid tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) def _getVDIs(self, sr): cmd = "xe vdi-list sr-uuid=%s params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) stdout = stdout.strip() if not stdout: return [] vdiList = stdout.split(",") return vdiList def _getLeafVDIs(self, sr): vdiList = self.getVDIs(sr) cmd = "xe vdi-list sr-uuid=%s name-label='base copy' params=uuid --minimal" % sr stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_SUB) stdout = stdout.strip() if not stdout: baseList = [] baseList = stdout.split(",") leafList = [] for vdi in vdiList: if vdi not in baseList: leafList.append(vdi) return leafList def _createVDI(self, sr, size, name, raw): cmd = "xe vdi-create sr-uuid=%s name-label='%s' \ type=user virtual-size=%d" % (sr, name, size) if raw: cmd += " sm-config:type=raw" else: cmd += " sm-config:type=vhd" stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) if not stdout: raise SMException("No output from vdi-create") vdiUuid = stdout.strip() if not tutil.validateUUID(vdiUuid): raise SMException("Got invalid UUID: %s" % vdiUuid) return vdiUuid def _cloneVDI(self, vdi): cmd = "xe vdi-clone uuid=%s" % vdi for i in range(CMD_NUM_RETRIES): try: stdout = tutil.execCmd(cmd, 0, self.logger, LOG_LEVEL_CMD) break except tutil.CommandException, inst: if str(inst).find("VDI_IN_USE") != -1: self.logger.log("Command failed, retrying", LOG_LEVEL_CMD) time.sleep(CMD_RETRY_PERIOD) else: raise if not stdout: raise SMException("No output from vdi-snapshot") cloneUuid = stdout.strip() if not tutil.validateUUID(cloneUuid): raise SMException("Got invalid UUID: %s" % cloneUuid) return cloneUuid