def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) configHolder.pdiskProtocol = "https" pdisk = VolumeManagerFactory.create(configHolder) for uuid in self.uuids: volumeId = pdisk.deleteVolume(uuid) print 'DELETED %s' % volumeId
def create_volume(self, size, name, location=None, snapshot=None): """ Creates a new storage volume with the given size. The 'name' corresponds to the volume tag. The visibility of the created volume is 'private'. The snapshot parameter is currently ignored. The created StorageVolume contains a dict for the extra information with a 'location' key storing the location used for the volume. This is set to 'default' if no location has been given. @inherits: L{NodeDriver.create_volume} """ config_holder = self._get_config_section(location) pdisk = VolumeManagerFactory.create(config_holder) # Creates a private disk. Boolean flag = False means private. vol_uuid = pdisk.createVolume(size, name, False) extra = {'location': location} return StorageVolume(vol_uuid, name, long(size), self, extra=extra)
def _changeOwnerOfSnapshotVolume(self): pdisk = VolumeManagerFactory.create(self.configHolder) # root volume may not exist, if this is an image creation # only actually change ownership of snapshot volumes if self.rootVolumeUuid: disk_identifier = pdisk.getValue('identifier', self.rootVolumeUuid) if re.match('.*snapshot.*', disk_identifier): pdisk.quarantineVolume(self.rootVolumeUuid)
def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) configHolder.pdiskProtocol = "https" pdisk = VolumeManagerFactory.create(configHolder) keyvalues = self.extractVolumeOptionsAsDict() if len(keyvalues) > 0: pdisk.updateVolumeAsUser(keyvalues, self.uuid)
def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) configHolder.pdiskProtocol = "https" pdisk = VolumeManagerFactory.create(configHolder) for uuid in self.uuids: try: target = pdisk.hotDetach(self.options.instance, uuid) print 'DETACHED %s from VM %s on /dev/%s' % (uuid, self.options.instance, target) except Exception, e: printError('DISK %s: %s' % (uuid, e), exit=False)
def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) configHolder.pdiskProtocol = "https" pdisk = VolumeManagerFactory.create(configHolder) for uuid in self.uuids: free, _ = pdisk.getVolumeUsers(uuid) if free < 1: printError('DISK %s: Disk not available\n' % uuid, exit=False) else: target = pdisk.hotAttach(self.node, self.options.instance, uuid) print 'ATTACHED %s in VM %s on /dev/%s' % (uuid, self.options.instance, target)
def _checkPersistentDiskAvailable(self): self.pdisk = VolumeManagerFactory.create(self.configHolder) try: available, _ = self.pdisk.getVolumeUsers(self.persistentDiskUUID) if self.instanceNumber > available: printError("disk cannot be attached; it is already mounted (%s/%s)" % (available, self.instanceNumber)) except AttributeError: printError("Persistent disk service unavailable", exit=False) raise except Exception as e: printError(e, exit=False) raise
def __init__(self, imageFile, configHolder=ConfigHolder()): self.imageMetadata = {} self.configHolder = configHolder configHolder.assign(self) self.imageFile = imageFile self.manifestFile = self.imageFile.replace('.img', '.xml') self.imageUrl = '' self.pdisk = VolumeManagerFactory.create(self.configHolder)
def _vm_get_root_disk_size_from_pdisk(disk_source, config_holder): disk_source = disk_source.replace('/', ':') pdisk_endpoint = ':'.join(disk_source.split(':')[1:3]) config_holder.set('pdiskUsername', config_holder.username) config_holder.set('pdiskPassword', config_holder.password) config_holder.set('pdiskEndpoint', pdisk_endpoint) pdisk = VolumeManagerFactory.create(config_holder) image_uuid = _disk_source_get_image_id(disk_source) volume = pdisk.describeVolumes({'uuid': ['^%s$' % image_uuid]}) if len(volume) == 0: raise Exceptions.ExecutionException('Failed to describe volume in %s with UUID %s' % (pdisk_endpoint, image_uuid)) return int(volume[0]['size'])
def _create_volume(self, job): job_id = self._job_id(job) vol_docid = str(job['targetResource']) self.logger.info('creating %s' % vol_docid) try: rv = self.cb.get(vol_docid) cas = rv.cas volume = rv.value except couchbase.exceptions.NotFoundError: msg = 'cannot retrieve %s' % vol_docid self._set_job_error(job_id, msg) return try: kbytes = volume['capacity'] except KeyError: msg = 'volume is missing capacity value' self._set_job_error(job_id, msg) self.update_volume_state(vol_docid, volume, cas, 'ERROR', msg=msg) return size = Controller.kb_to_gb(kbytes) try: tag = str(volume['name']) except KeyError: tag = None config_holder = ConfigHolder(config=self.cfg) try: pdisk = VolumeManagerFactory.create(config_holder) sl_uuid = pdisk.createVolume(size, tag, 'private') except Exception as e: msg = 'error creating volume: %s' % str(e) self._set_job_error(job_id, msg) self.update_volume_state(vol_docid, volume, cas, 'ERROR', msg=str(e)) return self.logger.info('created pdisk uuid %s' % str(sl_uuid)) self.update_volume_state(vol_docid, volume, cas, 'AVAILABLE', sl_uuid=sl_uuid) try: Util.retry_update_job(self.cb, job_id, state='SUCCESS', previous_state='RUNNING', progress=100, msg='OK', executor=self.executor) except Exception as e: self.logger.error('cannot update %s: %s' % (job_id, str(e)))
def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) configHolder.pdiskProtocol = "https" pdisk = VolumeManagerFactory.create(configHolder) if self.options.originUuid: volumeId = pdisk.createCowVolume(self.options.originUuid) elif self.options.rebaseUuid: volumeId = pdisk.rebaseVolume(self.options.rebaseUuid) else: volumeId = pdisk.createVolume(self.options.volumeSize, self.options.volumeTag, self.options.volumeVisibility) print 'DISK %s' % volumeId
def destroy_volume(self, volume): """ Destroys the given volume. @inherits: L{NodeDriver.destroy_volume} """ location = self._volume_location(volume) config_holder = self._get_config_section(location) pdisk = VolumeManagerFactory.create(config_holder) pdisk.deleteVolume(volume.id) return True
def detach_volume(self, volume): location = self._volume_location(volume) config_holder = self._get_config_section(location) pdisk = VolumeManagerFactory.create(config_holder) try: node = volume.extra['node'] except (AttributeError, KeyError): raise Exception('volume is not attached to a node') pdisk.hotDetach(node.id, volume.id) del (volume.extra['node']) return True
def _delete_volume(self, job): job_id = self._job_id(job) vol_docid = str(job['targetResource']) self.logger.info('deleting %s' % vol_docid) try: rv = self.cb.get(vol_docid) cas = rv.cas volume = rv.value except couchbase.exceptions.NotFoundError: msg = 'cannot retrieve %s' % vol_docid self._set_job_error(job_id, msg) return try: sl_uuid = str(volume['properties']['sl_uuid']) except Exception: msg = 'volume is missing sl_uuid property' self._set_job_error(job_id, msg) return config_holder = ConfigHolder(config=self.cfg) try: pdisk = VolumeManagerFactory.create(config_holder) pdisk.deleteVolume(sl_uuid) except Exception as e: msg = 'error deleting sl_uuid %s: %s' % (sl_uuid, str(e)) self._set_job_error(job_id, msg) return try: self.cb.delete(vol_docid, cas=cas) except couchbase.exceptions.KeyExistsError: msg = 'cannot delete %s' % vol_docid self._set_job_error(job_id, msg) return try: Util.retry_update_job(self.cb, job_id, state='SUCCESS', previous_state='RUNNING', progress=100, msg='OK', executor=self.executor) except Exception as e: self.logger.error('cannot update %s: %s' % (job_id, str(e)))
def attach_volume(self, node, volume, device=None): location = self._volume_location(volume) config_holder = self._get_config_section(location) pdisk = VolumeManagerFactory.create(config_holder) try: host = node.host except AttributeError: raise Exception('node does not contain host information') pdisk.hotAttach(host, node.id, volume.id) try: volume.extra['node'] = node except AttributeError: volume.extra = {'node': node} return True
def _detachAllVolumes(self): pdisk = VolumeManagerFactory.create(self.configHolder) msg = '' self.rootVolumeUuid = None for pdisk_uri in self.attachedVolumeURIs: pdisk_uri = pdisk_uri.strip() if pdisk_uri: # saves the root volume uuid so that the ownership can be changed later if not self.rootVolumeUuid: self.rootVolumeUuid = self._getDiskNameFromURI(pdisk_uri) try: self._detachSingleVolume(pdisk, pdisk_uri) except Exception as e: msg += str(e) + "\n" if msg: raise Exception(msg)
def list_volumes(self, location=None): """ Creates a list of all of the volumes in the given location. This will include private disks of the user as well as public disks from other users. This method is not a standard part of the Libcloud node driver interface. """ config_holder = self._get_config_section(location) pdisk = VolumeManagerFactory.create(config_holder) filters = {} volumes = pdisk.describeVolumes(filters) storage_volumes = [] for info in volumes: storage_volumes.append(self._create_storage_volume(info, location)) return storage_volumes
def _poll_storage_for_new_image(self, pdisk_endpoint, diid, slConfigHolder): # TODO: Introduce checking for the state of the VM. Bail out on Failed or Unknown. tag = "SlipStream-%s" % diid filters = { 'tag': [ tag, ] } slConfigHolder.set('pdiskEndpoint', pdisk_endpoint) pdisk = VolumeManagerFactory.create(slConfigHolder) print >> sys.stdout, "Searching on %s for disk with tag %s." % \ (pdisk_endpoint, tag) sys.stdout.flush() new_image_id = '' poll_duration = self._get_poll_storage_for_image_id_timeout() time_stop = time.time() + poll_duration time_sleep = self._get_poll_storage_for_image_id_sleep() print >> sys.stdout, "Sleeping for %s min with %s min intervals." % \ (poll_duration / 60, time_sleep / 60) while time.time() <= time_stop: volumes = pdisk.describeVolumes(filters) if len(volumes) > 0: try: new_image_id = volumes[0]['identifier'] except Exception as ex: print "Exception occurred looking for volume: %s" % ex break time.sleep(time_sleep) print >> sys.stdout, "Time left for search %d min." % ( (time_stop - time.time()) / 60) sys.stdout.flush() return new_image_id
def _pollStorageForNewImage(self, slConfigHolder): newImageId = '' msg_type = os.environ.get('SLIPSTREAM_MESSAGING_TYPE', None) msg_endpoint = os.environ.get('SLIPSTREAM_MESSAGING_ENDPOINT', None) if msg_type and msg_endpoint: if msg_type == 'pdisk': diid = os.environ.get('SLIPSTREAM_DIID', None) if diid: tag = "SlipStream-%s" % diid filters = {'tag': [tag, ]} slConfigHolder.set('pdiskEndpoint', msg_endpoint) pdisk = VolumeManagerFactory.create(slConfigHolder) print >> sys.stdout, "Searching on %s for disk with tag %s." % (msg_endpoint, tag) sys.stdout.flush() # hardcoded polling for 30' at 1' intervals for i in range(30): print >> sys.stdout, "Search iteration %d" % i sys.stdout.flush() volumes = pdisk.describeVolumes(filters) if len(volumes) > 0: try: newImageId = volumes[0]['identifier'] except Exception as e: print "Exception occurred looking for volume: %s" % e pass break time.sleep(60) print "Returning new image ID value: %s" % newImageId return newImageId
def persistentDiskStorageDownloadTest(self): """Check that an image can be downloaded correctly""" pdiskDevice = "/dev/hdc" # !!!! Configured for the default image (ttylinux) pdiskMountPoint = "/mnt/pdisk-test" testFile = "%s/pdisk.txt" testString = "pdiskTest" downloadedCompressedDisk = "/var/tmp/pdisk-img.gz" localMountPoint = "/mnt/pdisk-check" localTestFile = "/tmp/pdiskGzip.tmp" configHolder = Testor.configHolder.copy() configHolder.pdiskUsername = Testor.configHolder.testUsername configHolder.pdiskPassword = Testor.configHolder.testPassword pdisk = VolumeManagerFactory.create(configHolder) Util.printAction("Creating a new persistent disk") diskUUID = pdisk.createVolume(1, "test %s" % datetime.datetime.today(), False) Util.printAction("Checking persistent disk exists") if not pdisk.volumeExists(diskUUID): self.fail("An error occurred while creating a persistent disk") Util.printAction("Starting machine with persistent disk") runner = self._startVmWithPDiskAndWaitUntilUp(diskUUID) self._formatDisk(runner, pdiskDevice) self._mountDisk(runner, pdiskDevice, pdiskMountPoint) self._writeToFile(runner, testFile % pdiskMountPoint, testString) self._umountPDiskAndStopVm(runner, pdiskDevice) try: Util.printAction("Downloading volume...") # compressed disk comes in HTTP response - don't print it from HTTP client! verb_save = pdisk.client.verboseLevel pdisk.client.verboseLevel = 0 pdisk.downloadVolume(diskUUID, downloadedCompressedDisk) pdisk.client.verboseLevel = verb_save volume = self._gunzip(downloadedCompressedDisk) finally: try: remove(downloadedCompressedDisk) except: pass try: if not self._localMount(volume, localMountPoint, ["loop"]): self.fail("Error mounting downloaded image, corrupted?") filePutContent(localTestFile, testString) fileEquals = self._compareLocalFiles(localTestFile, testFile % localMountPoint) if not fileEquals: self.fail("Downloaded volume is corrupted") self._localUmount(localMountPoint) finally: Util.printAction("Post test clean-up...") try: remove(volume) except: pass try: rmdir(localMountPoint) except: pass
def persistentDiskStorageHotplugTest(self): """Ensure that a disk hot-plugged to a VM and then hot-unplugged""" pdiskDevice = "/dev/%s" pdiskMountPoint = "/mnt/pdisk-test" testFile = "%s/pdisk.txt" % pdiskMountPoint testFileCmp = "/tmp/pdisk.cmp" testString = "pdiskTest" configHolder = Testor.configHolder.copy() configHolder.pdiskUsername = Testor.configHolder.testUsername configHolder.pdiskPassword = Testor.configHolder.testPassword pdisk = VolumeManagerFactory.create(configHolder) runner = self._startVmWithPDiskAndWaitUntilUp(image=self.ubuntuImg) Util.printAction("Creating a new persistent disk") diskUUID = pdisk.createVolume(1, "test %s" % datetime.datetime.today(), False) Util.printAction("Checking persistent disk exists") if not pdisk.volumeExists(diskUUID): self.fail("An error occurred while creating a persistent disk") self._modeprobe(runner, "acpiphp") vmId = self.vmIds[0] node = runner.cloud.getVmNode(vmId) printStep("Attaching pdisk to VM") availableUserBeforeAttach, _ = pdisk.getVolumeUsers(diskUUID) device = pdisk.hotAttach(node, vmId, diskUUID) availableUserAfterAttach, _ = pdisk.getVolumeUsers(diskUUID) if availableUserAfterAttach != (availableUserBeforeAttach - 1): self.fail( "Available users on persistent disk have to decrease by " "one; before=%s, after=%s" % (availableUserBeforeAttach, availableUserAfterAttach) ) self._formatDisk(runner, pdiskDevice % device) self._mountDisk(runner, pdiskDevice % device, pdiskMountPoint) self._writeToFile(runner, testFile, testString) self._umountDisk(runner, pdiskDevice % device) printStep("Detaching pdisk of VM") pdisk.hotDetach(node, vmId, diskUUID) availableUserAfterDetach, _ = pdisk.getVolumeUsers(diskUUID) if availableUserAfterDetach != availableUserBeforeAttach: self.fail( "Available users on persistent disk have to be the " "same as when VM has started; before=%s, after=%s" % (availableUserBeforeAttach, availableUserAfterDetach) ) printStep("Re-attaching pdisk to VM") device = pdisk.hotAttach(node, vmId, diskUUID) self._mountDisk(runner, pdiskDevice % device, pdiskMountPoint) self._writeToFile(runner, testFileCmp, testString) self._compareFiles(runner, testFile, testFileCmp) self._umountPDiskAndStopVm(runner, pdiskDevice % device) availableUserAfterStop, _ = pdisk.getVolumeUsers(diskUUID) if availableUserAfterStop != availableUserBeforeAttach: self.fail( "Available users on persistent disk have to be the " "same as when VM has started; before=%s, after=%s" % (availableUserBeforeAttach, availableUserAfterStop) ) Util.printAction("Removing persistent disk...") pdisk.deleteVolume(diskUUID) try: if pdisk.volumeExists(diskUUID): self.fail("The persistent disk %s is still present" % diskUUID) except ClientException, ex: if not re.match("404", ex.status): self.fail("The persistent disk %s is still present" % diskUUID)
def persistentDiskStorageTest(self): """Ensure that a disk can be created, written, stored and removed""" pdiskDevice = "/dev/hdc" # !!!! Configured for the default image (ttylinux) pdiskMountPoint = "/mnt/pdisk-test" testFile = "%s/pdisk.txt" % pdiskMountPoint testFileCmp = "/tmp/pdisk.cmp" testString = "pdiskTest" configHolder = Testor.configHolder.copy() configHolder.pdiskUsername = Testor.configHolder.testUsername configHolder.pdiskPassword = Testor.configHolder.testPassword pdisk = VolumeManagerFactory.create(configHolder) Util.printAction("Creating a new persistent disk") diskUUID = pdisk.createVolume(1, "test %s" % datetime.datetime.today(), False) Util.printAction("Checking persistent disk exists") if not pdisk.volumeExists(diskUUID): self.fail("An error occurred while creating a persistent disk") Util.printAction("Getting number of available users (before)") availableUserBeforeStart, _ = pdisk.getVolumeUsers(diskUUID) Util.printAction("Starting machine with persistent disk") runner = self._startVmWithPDiskAndWaitUntilUp(diskUUID) Util.printAction("Getting number of available users (after)") availableUserAfterStart, _ = pdisk.getVolumeUsers(diskUUID) if availableUserAfterStart != (availableUserBeforeStart - 1): self.fail( "Available users on persistent disk have to decrease by " "one (%s, %s)" % (availableUserBeforeStart, availableUserAfterStart) ) self._formatDisk(runner, pdiskDevice) self._mountDisk(runner, pdiskDevice, pdiskMountPoint) self._writeToFile(runner, testFile, testString) self._umountPDiskAndStopVm(runner, pdiskDevice) # Allow a few seconds for the disk to be dismounted. time.sleep(10) availableUserAfterStop, _ = pdisk.getVolumeUsers(diskUUID) if availableUserAfterStop != availableUserBeforeStart: self.fail("Available users on persistent disk have to be the same " "as when VM has started") runner = self._startVmWithPDiskAndWaitUntilUp(diskUUID) self._mountDisk(runner, pdiskDevice, pdiskMountPoint) self._writeToFile(runner, testFileCmp, testString) self._compareFiles(runner, testFile, testFileCmp) self._umountPDiskAndStopVm(runner, pdiskDevice) availableUserAfterStop, _ = pdisk.getVolumeUsers(diskUUID) if availableUserAfterStop != availableUserBeforeStart: self.fail("Available users on persistent disk have to be the same " "as when VM has started") Util.printAction("Removing persistent disk...") pdisk.deleteVolume(diskUUID) try: if pdisk.volumeExists(diskUUID): self.fail("The persistent disk %s is still present" % diskUUID) except ClientException, ex: if not re.match("404", ex.status): self.fail("The persistent disk %s is still present" % diskUUID)
def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) pdisk = VolumeManagerFactory.create(configHolder) filters = self._formatFilter() volumes = pdisk.describeVolumes(filters) self._printVolumes(volumes)
def _initPdiskClient(self): self.pdiskEndpoint = self._createPdiskEndpoint() self.pdiskLVMDevice = self.configHolder.persistentDiskLvmDevice self.configHolder.set('pdiskEndpoint', self.pdiskEndpoint) self.pdisk = VolumeManagerFactory.create(self.configHolder)