def testCleanQuarantine(self): now = datetime.now() past = str(now - timedelta(days=1)) way_past = str(now - timedelta(days=3)) mock = Mock( return_value=[ {"quarantine": past, "uuid": "past"}, {"quarantine": way_past, "uuid": "way_past"}, {"quarantine": str(now), "uuid": "now"}, ] ) config = ConfigHolder() config.set("endpoint", "something") pd = PersistentDisk(config) pd.quarantinePeriod = "2d" pd.describeVolumes = mock pd.deleteVolume = Mock() pd._setPDiskUserCredentials = Mock() pd.cleanQuarantine() self.assertEqual(("way_past",), pd.deleteVolume.call_args[0]) self.assertEqual(1, pd.deleteVolume.call_count)
def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) configHolder.set('imageMetadata', PDiskVolume.extractVolumeOptionsAsDict(self.options)) uploader = Uploader(self.image, configHolder) try: uploader.start() except InputException, e: print e sys.exit(1)
def testSetImageIdOnMessage(self): configHolder = ConfigHolder() configHolder.set('msg_type', 'amazonsqs') self.failUnlessRaises(InputException, ImageIdPublisher, *('', '', configHolder)) publisher = ImageIdPublisher('', 'Oj3KIhOEZ4LPhJK7LdFdfluTw17', configHolder) assert '{"imageid": "Oj3KIhOEZ4LPhJK7LdFdfluTw17"}' == publisher.message publisher = ImageIdPublisher('{"foo": "bar"}', 'Oj3KIhOEZ4LPhJK7LdFdfluTw17', configHolder) assert '{"foo": "bar", "imageid": "Oj3KIhOEZ4LPhJK7LdFdfluTw17"}' == publisher.message
def _registerInvalidImageInMarketplace(self): manifest_file = Util.get_resources_file(["manifest-invalid-sha1.xml"]) manifestInfo = ManifestInfo() manifestInfo.parseManifestFromFile(manifest_file) image_id = manifestInfo.identifier configHolder = ConfigHolder() configHolder.set("marketplaceEndpoint", self.marketplaceEndpoint) uploader = marketplaceUploader(configHolder) uploader.upload(manifest_file) return image_id
class UploaderTest(unittest.TestCase): def setUp(self): self._uploadVolume = Mock(return_value='https://example.com/pdisk/uuid') self._updateVolumeAsUser = Mock() self.ch = ConfigHolder() self.ch.set('pdiskEndpoint', 'example.com') def tearDown(self): pass def test_uploadImageNoVolumeUpdate(self): uploader = Uploader('image.img', self.ch) uploader.pdisk.uploadVolume = self._uploadVolume uploader.pdisk.updateVolumeAsUser = self._updateVolumeAsUser uploader._uploadImage() assert uploader.pdisk.uploadVolume.called == True assert uploader.pdisk.uploadVolume.call_count == 1 assert uploader.pdisk.uploadVolume.call_args == (('image.img',), {}) assert uploader.pdisk.uploadVolume._return_value == 'https://example.com/pdisk/uuid' assert uploader.pdisk.updateVolumeAsUser.called == False def test_uploadImageVolumeUpdate(self): self.ch.set('imageMetadata', {'foo':'bar'}) uploader = Uploader('image.img', self.ch) uploader.pdisk.uploadVolume = self._uploadVolume uploader.pdisk.updateVolumeAsUser = self._updateVolumeAsUser uploader._uploadImage() assert uploader.pdisk.uploadVolume.called == True assert uploader.pdisk.uploadVolume.call_count == 1 assert uploader.pdisk.uploadVolume.call_args == (('image.img',), {}) assert uploader.pdisk.uploadVolume._return_value == 'https://example.com/pdisk/uuid' assert uploader.pdisk.updateVolumeAsUser.called == True assert uploader.pdisk.updateVolumeAsUser.call_count == 1 assert uploader.pdisk.updateVolumeAsUser.call_args == (({'foo':'bar'}, 'uuid'), {})
def testParseQuarantinePeriod(self): config = ConfigHolder() config.set("endpoint", "something") pd = PersistentDisk(config) pd.quarantinePeriod = None self.assertRaises(ValidationException, pd._getQuarantinePeriod) pd.quarantinePeriod = "15x" self.assertRaises(ValidationException, pd._getQuarantinePeriod) pd.quarantinePeriod = "xym" self.assertRaises(ValidationException, pd._getQuarantinePeriod) pd.quarantinePeriod = "15" self.assertEqual(pd._getQuarantinePeriod(), 15) pd.quarantinePeriod = "15m" self.assertEqual(pd._getQuarantinePeriod(), 15) pd.quarantinePeriod = "15h" self.assertEqual(pd._getQuarantinePeriod(), 15 * 60) pd.quarantinePeriod = "15d" self.assertEqual(pd._getQuarantinePeriod(), 15 * 60 * 24)
class MsgClientTest(unittest.TestCase): def setUp(self): self.ch = ConfigHolder() self.temp_dir = tempfile.mkdtemp() self.ch.set('msg_queue', self.temp_dir) self.ch.set('msg_endpoint', 'foo:1234') def tearDown(self): shutil.rmtree(self.temp_dir, ignore_errors=True) def testGetMsgClient(self): for msg_type in MSG_CLIENTS.keys(): self.ch.set('msg_type', msg_type) getMsgClient(self.ch) def testSendImplemented(self): for msg_type in MSG_CLIENTS.keys(): self.ch.set('msg_type', msg_type) client = getMsgClient(self.ch) try: client.send("message") except NotImplementedError: self.fail("send() should be implemented on '%s'." % msg_type) except Exception: pass
def test_getNetworkIdsFromNetworkNames(self): ch = ConfigHolder() ch.set('frontendSystem', 'fedora') ch.set('oneHome', '~') ch.set('oneUsername', 'foo') ch.set('onePassword', 'bar') oi = OpenNebulaFrontend(ch) def _getVnetInfoXml(vnet_name): if vnet_name == 'public': xml = OneInstallatorTest.VNET_INFO_XML % ('0', vnet_name) elif vnet_name == 'local': xml = OneInstallatorTest.VNET_INFO_XML % ('1', vnet_name) elif vnet_name == 'private': xml = OneInstallatorTest.VNET_INFO_XML % ('2', vnet_name) return xml oi._getVnetInfoXml = _getVnetInfoXml ids = oi._getVnetIdsFromVnetNames() assert [0, 1, 2] == ids
def test_extraDisksOnStratusLabRunner(self): stratuslabClient = StratuslabClientCloud( SlipstreamConfigHolder(context={'foo': 'bar'}, config={'foo': 'bar'})) slch = StratusLabConfigHolder() slch.set('username', 'foo') slch.set('password', 'bar') slch.set('endpoint', 'example.com') slch.set('verboseLevel', 0) node = {} node['extra_disks'] = {'extra.disk.volatile': '123', # GB 'extra_disk_persistent': '1-2-3', 'extra_disk_readonly': 'ABC'} stratuslabClient._setExtraDisksOnConfigHolder(slch, node) Runner._checkPersistentDiskAvailable = Mock() runner = stratuslabClient._getStratusLabRunner('abc', slch) assert runner.extraDiskSize == int('123') * 1024 # MB assert runner.persistentDiskUUID == '1-2-3' assert runner.readonlyDiskId == 'ABC'
def test_extraDisksOnStratusLabRunner(self): stratuslabClient = StratusLabClientCloud( SlipstreamConfigHolder(context={'foo': 'bar'}, config={'foo': 'bar'})) slch = StratusLabConfigHolder() slch.set('username', 'foo') slch.set('password', 'bar') slch.set('endpoint', 'example.com') slch.set('verboseLevel', 0) node_instance = NodeInstance({ 'cloudservice': 'stratuslab', 'extra.disk.volatile': '123', 'stratuslab.extra_disk_persistent': '1-2-3', 'stratuslab.extra_disk_readonly': 'ABC' }) stratuslabClient._set_extra_disks_on_config_holder(slch, node_instance) Runner._setPersistentDiskOptional = Mock() runner = stratuslabClient._get_stratuslab_runner('abc', slch) assert runner.extraDiskSize == int('123') * 1024 # MB assert runner.persistentDiskUUID == '1-2-3' assert runner.readonlyDiskId == 'ABC'
class StratusLabClientCloud(BaseCloudConnector): RUNINSTANCE_RETRY_TIMEOUT = 3 POLL_STORAGE_FOR_IMAGE_ID_TIMEOUT_MIN = 30 POLL_STORAGE_FOR_IMAGE_ID_SLEEP_MIN = 1 cloudName = 'stratuslab' @staticmethod def _wait_vm_in_state(states, runner, vm_id, counts=3, sleep=2, throw=False): counter = 1 while counter <= counts: state = runner.getVmState(vm_id) if state in states: return state time.sleep(sleep) counter += 1 if throw: raise Exception('Timed out while waiting for states: %s' % states) def __init__(self, configHolder): self.creator = None super(StratusLabClientCloud, self).__init__(configHolder) self.slConfigHolder = StratuslabConfigHolder(configHolder.options, configHolder.config) self._set_listener( CreatorBaseListener(verbose=(self.verboseLevel > 1))) self._set_capabilities(contextualization=True, direct_ip_assignment=True, orchestrator_can_kill_itself_or_its_vapp=True) if self.verboseLevel > 2: LogUtil.set_logger_level(level=logging.DEBUG) # Temporary workaround: Try to increase to the maximum the limit of the number of open file descriptors. # This is a workaround to a bug where some connections to the StratusLab frontend stays in CLOSE_WAIT. # The standard limit is hit when the Run contains a huge amount of VMs (> 1000). try: import resource l = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (l[1], l[1])) except: pass def _start_image_for_build(self, user_info, node_instance): self._prepare_machine_for_build_image(user_info) manifest_downloader = ManifestDownloader(self.slConfigHolder) image_id = node_instance.get_image_id() node_instance.set_image_attributes({ 'imageVersion': manifest_downloader.getImageVersion(imageId=image_id) }) self._update_stratuslab_config_holder_for_build_image( user_info, node_instance) self.creator = Creator(image_id, self.slConfigHolder) self.creator.setListener(self._get_listener()) createImageTemplateDict = self.creator._getCreateImageTemplateDict() def our_create_template_dict(): createImageTemplateDict.update({}) return createImageTemplateDict self.creator._getCreateImageTemplateDict = our_create_template_dict self.creator.createStep1() vm = self.creator.runner return vm def list_instances(self): self.slConfigHolder.set('ipToHostname', False) vms = Monitor(self.slConfigHolder).listVms() populate_vms_with_disk_sizes(vms, self.slConfigHolder.deepcopy()) return vms def _vm_get_ip_from_list_instances(self, vm_instance): return vm_instance.template_nic_ip def _vm_get_cpu(self, vm_instance): return vm_instance.template_vcpu def _vm_get_ram(self, vm_instance): return vm_instance.template_memory def _vm_get_root_disk(self, vm_instance): try: return vm_instance.template_disk_0_size except AttributeError: try: return get_root_disk_size_from_disk_source( vm_instance.template_disk_source, self.slConfigHolder.deepcopy()) except UnknownRootDiskSizeSourceError: return super(BaseCloudConnector, self)._vm_get_root_disk() @override def _build_image(self, user_info, node_instance): machine_name = node_instance.get_name() vm = self._get_vm(machine_name) vm_ip = self._vm_get_ip(vm) self._build_image_increment(user_info, node_instance, vm_ip) self.creator.createStep2() image_id = self._search_storage_for_new_image(self.slConfigHolder) if not image_id: util.printDetail( 'WARNING: Failed to get image ID from StratusLab storage!', verboseThreshold=0) else: util.printDetail('New built image ID %s' % image_id, verboseThreshold=0) return image_id def _search_storage_for_new_image(self, slConfigHolder): warn_msg = "WARNING: Unable to search for new image ID. %s env.var is not set." pdisk_endpoint = os.environ.get('SLIPSTREAM_PDISK_ENDPOINT', None) if not pdisk_endpoint: print >> sys.stdout, warn_msg % 'SLIPSTREAM_PDISK_ENDPOINT' sys.stdout.flush() return '' diid = os.environ.get('SLIPSTREAM_DIID', None) if not diid: print >> sys.stdout, warn_msg % 'SLIPSTREAM_DIID' sys.stdout.flush() return '' return self._poll_storage_for_new_image(pdisk_endpoint, diid, slConfigHolder) def _poll_storage_for_new_image(self, pdisk_endpoint, diid, slConfigHolder): # TODO: Introduce checking for the state of the VM. Bail out on Failed or Unknown. tag = "SlipStream-%s" % diid filters = { 'tag': [ tag, ] } slConfigHolder.set('pdiskEndpoint', pdisk_endpoint) pdisk = VolumeManagerFactory.create(slConfigHolder) print >> sys.stdout, "Searching on %s for disk with tag %s." % \ (pdisk_endpoint, tag) sys.stdout.flush() new_image_id = '' poll_duration = self._get_poll_storage_for_image_id_timeout() time_stop = time.time() + poll_duration time_sleep = self._get_poll_storage_for_image_id_sleep() print >> sys.stdout, "Sleeping for %s min with %s min intervals." % \ (poll_duration / 60, time_sleep / 60) while time.time() <= time_stop: volumes = pdisk.describeVolumes(filters) if len(volumes) > 0: try: new_image_id = volumes[0]['identifier'] except Exception as ex: print "Exception occurred looking for volume: %s" % ex break time.sleep(time_sleep) print >> sys.stdout, "Time left for search %d min." % ( (time_stop - time.time()) / 60) sys.stdout.flush() return new_image_id def _get_poll_storage_for_image_id_timeout(self): "Returns the timeout in seconds." return self.POLL_STORAGE_FOR_IMAGE_ID_TIMEOUT_MIN * 60 def _get_poll_storage_for_image_id_sleep(self): "Returns the sleep time in seconds." return self.POLL_STORAGE_FOR_IMAGE_ID_SLEEP_MIN * 60 @staticmethod def _get_create_image_messaging_message(image_resource_uri): return base64.b64encode('{"uri":"%s", "imageid":""}' % image_resource_uri) @override def _initialization(self, user_info, **kwargs): self.slConfigHolder.options.update(Runner.defaultRunOptions()) self._set_user_info_on_stratuslab_config_holder( user_info, run_instance=kwargs.get('run_instance', True)) @override def _start_image(self, user_info, node_instance, vm_name): if self.is_build_image(): return self._start_image_for_build(user_info, node_instance) else: return self._start_image_for_deployment(node_instance, vm_name) def _start_image_for_deployment(self, node_instance, vm_name): slConfigHolder = self.slConfigHolder.deepcopy() self._set_instance_params_on_config_holder(slConfigHolder, node_instance) image_id = node_instance.get_image_id() self._set_extra_context_data_on_config_holder(slConfigHolder, node_instance) self._set_vm_name_on_config_holder(slConfigHolder, vm_name) runner = self._run_instance(image_id, slConfigHolder) return runner @override def _vm_get_ip(self, vm): if isinstance(vm, CloudInfo): return getattr(vm, Monitor.TEMPLATE_NIC_IP) else: # Runner return vm.instancesDetail[0]['ip'] @override def _vm_get_id(self, vm): if isinstance(vm, CloudInfo): return vm.id else: # Runner return vm.instancesDetail[0]['id'] @override def _vm_get_state(self, vm): if isinstance(vm, CloudInfo): return vm.state_summary else: # Runner return vm.instancesDetail[0]['state'] def _set_instance_params_on_config_holder(self, slConfigHolder, node_instance): self._set_instance_size_on_config_holder(slConfigHolder, node_instance) self._set_extra_disks_on_config_holder(slConfigHolder, node_instance) self._set_network_type_on_config_holder(slConfigHolder, node_instance) def _set_instance_size_on_config_holder(self, slConfigHolder, node_instance): self._set_instance_type_on_configholder(slConfigHolder, node_instance) self._set_cpu_ram_on_config_holder(slConfigHolder, node_instance) def _set_instance_type_on_configholder(self, slConfigHolder, node_instance): instance_type = node_instance.get_instance_type() if instance_type: slConfigHolder.instanceType = instance_type def _set_cpu_ram_on_config_holder(self, slConfigHolder, node_instance): slConfigHolder.vmCpu = node_instance.get_cpu() or None vm_ram_gb = node_instance.get_ram() or None if vm_ram_gb: try: # StratusLab needs value in MB slConfigHolder.vmRam = str(int(vm_ram_gb.strip()) * 1024) except: pass def _set_extra_disks_on_config_holder(self, slConfigHolder, node_instance): # 'extra_disk_volatile' is given in GB - 'extraDiskSize' needs to be in MB slConfigHolder.extraDiskSize = int( node_instance.get_volatile_extra_disk_size() or 0) * 1024 slConfigHolder.persistentDiskUUID = node_instance.get_cloud_parameter( 'extra_disk_persistent', '') slConfigHolder.readonlyDiskId = node_instance.get_cloud_parameter( 'extra_disk_readonly', '') def _set_extra_context_data_on_config_holder(self, slConfigHolder, node_instance): node_instance_name = node_instance.get_name() regex = 'SLIPSTREAM_' if self.is_start_orchestrator(): regex += '|CLOUDCONNECTOR_' env_matcher = re.compile(regex) slConfigHolder.extraContextData = '#'.join([ '%s=%s' % (k, v) for (k, v) in os.environ.items() if env_matcher.match(k) ]) slConfigHolder.extraContextData += '#%s=%s' % ( util.ENV_NODE_INSTANCE_NAME, node_instance_name) slConfigHolder.extraContextData += '#SCRIPT_EXEC=%s' % self._build_slipstream_bootstrap_command( node_instance) def _set_vm_name_on_config_holder(self, slConfigHolder, vm_name): slConfigHolder.vmName = vm_name def _run_instance(self, image_id, slConfigHolder, max_attempts=3): if max_attempts <= 0: max_attempts = 1 attempt = 1 while True: try: runner = self._do_run_instance(image_id, slConfigHolder) except socket.error, ex: if attempt >= max_attempts: import traceback cause = '' cause_lines = traceback.format_exception(*sys.exc_info()) for line in cause_lines: cause += line raise Exceptions.ExecutionException( "Failed to launch instance after %i attempts: %s \nCaused by :\n%s" % (attempt, str(ex), cause)) time.sleep(self.RUNINSTANCE_RETRY_TIMEOUT) attempt += 1 else: return runner
def _get_config_holder(): config = PDiskHelper._get_config_as_dict() ch = ConfigHolder() ch.set('pdiskEndpoint', config['persistentDiskIp']) ch.set('persistentDiskCloudServiceUser', config['persistentDiskCloudServiceUser']) return ch
class TMCloneCache(object): """Clone or retrieve from cache disk image""" # Debug option PRINT_TRACE_ON_ERROR = True DEFAULT_VERBOSE_LEVEL = 0 _ARGS_LEN = 3 # Position of the provided args _ARG_SRC_POS = 1 _ARG_DST_POS = 2 _PDISK_PORT = 8445 _CHECKSUM = 'sha1' _ACCEPTED_EXTRA_DISK_TYPE = [ 'DATA_IMAGE_RAW_READONLY', 'DATA_IMAGE_RAW_READ_WRITE' ] _ACCEPTED_ROOT_DISK_TYPE = 'MACHINE_IMAGE_LIVE' _IDENTIFIER_KEY = 'identifier' _COUNT_KEY = 'count' _TYPE_KEY = 'type' _OWNER_KEY = 'owner' _VISIBILITY_KEY = 'visibility' _PDISK_SUPERUSER = '******' _DISK_UNAUTHORIZED_VISIBILITIES = ['PRIVATE'] def __init__(self, args, **kwargs): self.diskSrc = None self.diskDstPath = None self.diskDstHost = None self.marketplaceEndpoint = None self.marketplaceImageId = None self.pdiskImageId = None self.pdiskSnapshotId = None self.downloadedLocalImageLocation = None self.downloadedLocalImageSize = 0 self.vmOwner = None self._parseArgs(args) self._initFromConfig(kwargs.get('conf_filename', '')) self._initPdiskClient() self._initMarketplaceRelated() self.defaultSignalHandler = None def _initFromConfig(self, conf_filename=''): config = ConfigHolder.configFileToDictWithFormattedKeys( conf_filename or defaultConfigFile) options = PDiskEndpoint.options() self.configHolder = ConfigHolder(options, config) self.configHolder.set('pdiskEndpoint', self._createPdiskEndpoint()) self.configHolder.set('verboseLevel', self.DEFAULT_VERBOSE_LEVEL) self.configHolder.assign(self) def _initPdiskClient(self): self.pdiskEndpoint = self._createPdiskEndpoint() self.pdiskLVMDevice = self.configHolder.persistentDiskLvmDevice self.configHolder.set('pdiskEndpoint', self.pdiskEndpoint) self.pdisk = VolumeManagerFactory.create(self.configHolder) def _initMarketplaceRelated(self): self._retrieveMarketplaceInfos() self._initManifestDownloader() def _initManifestDownloader(self): self.manifestDownloader = ManifestDownloader(self.configHolder) def run(self): self._retrieveDisk() def _checkArgs(self, args): if len(args) != self._ARGS_LEN: raise ValueError('Invalid number of arguments') def _parseArgs(self, args): self._checkArgs(args) dst = args[self._ARG_DST_POS] self.diskDstHost = self._getDiskHost(dst) self.diskDstPath = self._getDiskPath(dst) self.diskSrc = args[self._ARG_SRC_POS] # FIXME: duplicates should be pulled into common location def _createPdiskEndpoint(self): host = self.configHolder.persistentDiskIp port = self.configHolder.persistentDiskPort or _PDISK_PORT path = self.configHolder.persistentDiskPath or '' path = path.strip('/') return 'https://%s:%s/%s' % (host, port, path) def _updatePDiskSrcUrlFromPublicToLocalIp(self): """When PDisk is running behind a proxy, KVMs usually can't connect to it on the public IP. Instead substitute the public IP with the local one. Substitution is only made if the pdisk URL points to the public IP of the PDisk (i.e., the source disk is located on this site). persistent_disk_public_base_url should be set in the configuration.""" src_pdisk_hostname = Util.getHostnameFromUri(self.diskSrc) public_pdisk_hostname = Util.getHostnameFromUri( self.persistentDiskPublicBaseUrl) or self.persistentDiskIp if src_pdisk_hostname == public_pdisk_hostname: disk_src_parts = urlparse(self.diskSrc) (scheme, _, path, params, query, fragment) = disk_src_parts netloc = self.persistentDiskIp if disk_src_parts.port: netloc = netloc + ":" + str(disk_src_parts.port) self.diskSrc = urlunparse((scheme, netloc, path, params, query, fragment)) def _retrieveDisk(self): if self.diskSrc.startswith('pdisk:'): self.diskSrc = self.diskSrc[len('pdisk:'):] # strip prefix self._updatePDiskSrcUrlFromPublicToLocalIp() self._startFromPersisted() else: self._startFromCowSnapshot() def _startFromPersisted(self): diskId = self.diskSrc.rstrip('/').split('/').pop() diskType = self.pdisk.getValue('type', diskId) is_root_disk = self._getDiskIndex(self.diskDstPath) is 0 if is_root_disk: self._checkBootDisk(diskId, diskType) elif diskType not in self._ACCEPTED_EXTRA_DISK_TYPE: raise ValueError( 'Only %s type disks can be attached as extra disks' % ', '.join(self._ACCEPTED_EXTRA_DISK_TYPE)) self._createDestinationDir() self._attachPDisk(self.diskSrc) self._incrementVolumeUserCount(diskId) def _incrementVolumeUserCount(self, diskId): user_count = self.pdisk.getVolumeUserCount(diskId) self.pdisk.updateVolume({self._COUNT_KEY: str(user_count + 1)}, diskId) def _checkBootDisk(self, diskId, diskType): is_live_machine_disk = diskType in self._ACCEPTED_ROOT_DISK_TYPE user_count = self.pdisk.getVolumeUserCount(diskId) if not is_live_machine_disk: raise Exception('Only a live persistent disk can be booted from.') if user_count != 0: raise Exception( 'User count must be zero on the live disk to boot from.') def _startFromCowSnapshot(self): if self._cacheMiss(): #self._retrieveAndCachePDiskImage() self._remotelyCachePDiskImage() try: self._checkAuthorization() self._createPDiskSnapshot() self._setSnapshotOwner() self._createDestinationDir() self._attachPDisk(self._getPDiskSnapshotURL()) except: self._deletePDiskSnapshot() raise # ------------------------------------------- # Cache management and related # ------------------------------------------- def _cacheMiss(self): foundIds = self._getPDiskImageIdsFromMarketplaceImageId() if len(foundIds) > 0: self.pdiskImageId = foundIds[0] return False return True def _createDestinationDir(self): dstDir = dirname(self.diskDstPath) self._sshDst(['mkdir', '-p', dstDir], 'Unable to create directory %s' % dstDir) def _downloadImage(self): imageLocations = self.manifestDownloader.getImageLocations() self._assertLength(imageLocations, 1, atLeast=True) imageMarketplaceLocation = imageLocations[0] imageName = self._getImageIdFromURI(imageMarketplaceLocation) pdiskTmpStore = self._getPDiskTempStore() self.downloadedLocalImageLocation = '%s/%s.%s' % ( pdiskTmpStore, int(time()), imageName) self._sshPDisk([ 'curl', '-H', 'accept:application/x-gzip', '-L', '-o', self.downloadedLocalImageLocation, imageMarketplaceLocation ], 'Unable to download "%s"' % imageMarketplaceLocation) def _checkDownloadedImageChecksum(self): hash_fun = self._CHECKSUM size_b, checksum = self._getDownloadedImageChecksum(hash_fun) self._validateImageSize(size_b) self._validateImageChecksum(checksum, hash_fun) def _getDownloadedImageChecksum(self, hash_fun): size_b, sums = Compressor.checksum_file( self.downloadedLocalImageLocation, [hash_fun]) return size_b, sums[self._CHECKSUM] def _validateImageSize(self, size_b): image_size_b = self._getImageSize() # convert both to strings to avoid inequality because of type mismatch if str(size_b) != str(image_size_b): raise ValueError( "Downloaded image size (%s) doesn't match size in image manifest (%s)" % (size_b, image_size_b)) def _validateImageChecksum(self, checksum, hash_fun): image_checksum = self._getImageChecksum(hash_fun) if checksum != image_checksum: raise ValueError('Invalid image checksum: got %s, defined %s' % (checksum, image_checksum)) def _getImageFormat(self): return self.manifestDownloader.getImageElementValue('format') def _getImageKind(self): return self.manifestDownloader.getImageElementValue('kind') def _getImageSize(self): return self.manifestDownloader.getImageElementValue('bytes') def _getImageChecksum(self, checksum): return self.manifestDownloader.getImageElementValue(checksum) def _deleteDownloadedImage(self): self._sshPDisk(['rm', '-f', self.downloadedLocalImageLocation], 'Unable to remove temporary image', True) # ------------------------------------------- # Marketplace and related # ------------------------------------------- def _retrieveMarketplaceInfos(self): # Marketplace URLs can start with either http OR https! if self.diskSrc.startswith(('http://', 'https://')): self.marketplaceEndpoint = self._getMarketplaceEndpointFromURI( self.diskSrc) self.marketplaceImageId = self._getImageIdFromURI(self.diskSrc) elif self.diskSrc.startswith( 'pdisk:'): # Ignore Marketplace if pdisk is used self.marketplaceEndpoint = None self.marketplaceImageId = None else: # Local marketplace self.marketplaceEndpoint = 'http://localhost' try: self.marketplaceEndpoint = self.configHolder.marketplaceEndpointLocal except: pass # SunStone adds '<hostname>:' to the image ID self.marketplaceImageId = self.diskSrc.rstrip('/').split('/').pop() if self.marketplaceEndpoint: self.configHolder.set('marketplaceEndpoint', self.marketplaceEndpoint) def _getMarketplaceEndpointFromURI(self, uri): matcher = re.match("^(.*)/metadata/.*$", uri) return matcher.group(1) def _getImageIdFromURI(self, uri): fragments = uri.rstrip('/').split('/') return fragments.pop() def _validateMarketplaceImagePolicy(self): try: policy = Policy(self.configHolder) policy.check(self.marketplaceImageId) except: raise Exception('Policy validation failed') def _buildFullyQualifiedMarketplaceImage(self, policyCheckResult, imagePos): selectedImage = policyCheckResult[imagePos] uri = '%s/metadata/%s/%s/%s' % ( self.marketplaceEndpoint, selectedImage.identifier, selectedImage.endorser, selectedImage.created) return uri def _getPDiskImageIdsFromMarketplaceImageId(self): return self.pdisk.search(self._IDENTIFIER_KEY, self.marketplaceImageId) # ------------------------------------------- # Persistent disk and related # ------------------------------------------- def _attachPDisk(self, diskSrc): uuid = diskSrc.rstrip('/').split('/').pop() turl = self.pdisk.getTurl(uuid) disk_name = basename(self.diskDstPath) vm_id = self._retrieveInstanceId() vm_dir = dirname(dirname(dirname(self.diskDstPath))) self._sshDst([ '/usr/sbin/stratus-pdisk-client.py', '--pdisk-id', diskSrc, '--vm-dir', vm_dir, '--vm-id', str(vm_id), '--vm-disk-name', disk_name, '--turl', turl, '--register', '--mark', '--attach', '--link', '--op', 'up' ], 'Unable to attach persistent disk: %s, %s, %s, %s, %s' % (diskSrc, vm_dir, str(vm_id), disk_name, turl)) def _retrieveAndCachePDiskImage(self): self.manifestDownloader.downloadManifestByImageId( self.marketplaceImageId) self._validateMarketplaceImagePolicy() try: self._downloadImage() self._checkDownloadedImageChecksum() self._uploadDownloadedImageToPdisk() except: self._deletePDiskSnapshot() raise finally: try: self._deleteDownloadedImage() except: pass def _remotelyCachePDiskImage(self): """ This function initializes a new persistent volume from a URL. The image contents are downloaded directly from the URL by the persistent disk service. The size (in bytes) and SHA-1 checksum are also validated. """ self.manifestDownloader.downloadManifestByImageId( self.marketplaceImageId) self._validateMarketplaceImagePolicy() imageLocations = self.manifestDownloader.getImageLocations() self._assertLength(imageLocations, 1, atLeast=True) url = imageLocations[0] sizeInBytes = self._getImageSize() sha1 = self._getImageChecksum(self._CHECKSUM) gbBytes = 10**9 sizeInGB = long(sizeInBytes) / gbBytes if long(sizeInBytes) % gbBytes > 0: sizeInGB += 1 self.pdiskImageId = self.pdisk.createVolumeFromUrl( sizeInGB, '', False, url, str(sizeInBytes), sha1) self._setNewPDiskImageOriginProperties() def _uploadDownloadedImageToPdisk(self): volume_url = self.pdisk.uploadVolume(self.downloadedLocalImageLocation) self.pdiskImageId = volume_url.rsplit('/', 1)[1] self._setNewPDiskImageOriginProperties() def _setNewPDiskImageOriginProperties(self): self._setPDiskInfo(self._IDENTIFIER_KEY, self.marketplaceImageId, self.pdiskImageId) self._setPDiskInfo(self._TYPE_KEY, 'MACHINE_IMAGE_ORIGIN', self.pdiskImageId) def _getPDiskTempStore(self): store = self.configHolder.persistentDiskTempStore or '/tmp' self._sshDst(['mkdir', '-p', store], 'Unable to create temporary store') return store def _createPDiskSnapshot(self): snapshotIdentifier = 'snapshot:%s' % self.pdiskImageId self.pdiskSnapshotId = self.pdisk.createCowVolume( self.pdiskImageId, None) self._setPDiskIdentifier(snapshotIdentifier, self.pdiskSnapshotId) def _checkAuthorization(self): self.vmOwner = self._deriveVMOwner() disk_owner = self._getDiskOwner(self.pdiskImageId) disk_visibility = self._getDiskVisibility(self.pdiskImageId) if disk_owner not in [self.vmOwner, self._PDISK_SUPERUSER] and \ disk_visibility in self._DISK_UNAUTHORIZED_VISIBILITIES: raise ValueError('User %s is not authorized to start image %s' % \ (self.vmOwner, self.marketplaceImageId)) def _setSnapshotOwner(self): if not self.vmOwner: raise ValueError('VM owner is not set.') self.pdisk.updateVolume({'owner': self.vmOwner}, self.pdiskSnapshotId) def _setPDiskInfo(self, key, value, pdiskId): self.pdisk.updateVolume({key: value}, pdiskId) def _setPDiskIdentifier(self, value, pdiskId): self.pdisk.updateVolume({self._IDENTIFIER_KEY: value}, pdiskId) def _getPDiskSnapshotURL(self): return '%s/%s' % (self.pdiskEndpoint, self.pdiskSnapshotId) def _deletePDiskSnapshot(self, *args, **kwargs): if self.pdiskSnapshotId is None: return try: #FIXME: why do we need to set credentials here? self.pdisk._setPDiskUserCredentials() self.pdisk.deleteVolume(self.pdiskSnapshotId) except: pass # ------------------------------------------- # Utility # ------------------------------------------- def _removeExtension(self, filename): return '.'.join(filename.split('.')[:-1]) def _getVirtualSizeBytesFromQemu(self, qemuOutput): for line in qemuOutput.split('\n'): if line.lstrip().startswith('virtual'): bytesAndOtherThings = line.split('(') self._assertLength(bytesAndOtherThings) bytesAndUnit = bytesAndOtherThings[1].split(' ') self._assertLength(bytesAndUnit) return int(bytesAndUnit[0]) raise ValueError('Unable to find image bytes size') def _getDiskPath(self, arg): return self._getStringPart(arg, 1) def _getDiskHost(self, arg): return self._getStringPart(arg, 0) def _getStringPart(self, arg, part, nbPart=2, delimiter=':'): path = arg.split(delimiter) self._assertLength(path, nbPart) return path[part] def _findNumbers(self, elems): findedNb = [] for nb in elems: try: findedNb.append(int(nb)) except Exception: pass return findedNb def _getDiskIndex(self, diskPath): try: return int(diskPath.split('.')[-1]) except: raise ValueError('Unable to determine disk index') def _assertLength(self, elements, length=2, errorMsg=None, atLeast=False): nbElem = len(elements) if not errorMsg: errorMsg = 'Object should have a length of %s%s , got %s\n%s' % ( length, atLeast and ' at least' or '', nbElem, str(elements)) if not atLeast and nbElem != length or nbElem < length: raise ValueError(errorMsg) def _bytesToGiga(self, bytesAmount): return (bytesAmount / 1024**3) + 1 def _sshDst(self, cmd, errorMsg, dontRaiseOnError=False): retCode, output = sshCmdWithOutput( ' '.join(cmd), self.diskDstHost, user=getuser(), sshKey=sshPublicKeyLocation.replace('.pub', '')) if not dontRaiseOnError and retCode != 0: raise Exception('%s\n: Error: %s' % (errorMsg, output)) return output def _sshPDisk(self, cmd, errorMsg, dontRaiseOnError=False): cmd_str = ' '.join(cmd) printStep("Executing: %s" % cmd_str) retCode, output = sshCmdWithOutput( cmd_str, self.pdisk.persistentDiskIp, user=getuser(), sshKey=self.pdisk.persistentDiskPrivateKey.replace('.pub', '')) if not dontRaiseOnError and retCode != 0: raise Exception('%s\n: Error: %s' % (errorMsg, output)) return output def _getVMOwner(self, instanceId): credentials = LocalhostCredentialsConnector(self.configHolder) cloud = CloudConnectorFactory.getCloud(credentials) cloud.setEndpointFromParts('localhost', self.configHolder.onePort) return cloud.getVmOwner(instanceId) def _retrieveInstanceId(self): pathElems = self.diskDstPath.split('/') instanceId = self._findNumbers(pathElems) errorMsg = '%s instance ID in path. ' + 'Path is "%s"' % self.diskDstPath if len(instanceId) != 1: raise ValueError( errorMsg % ((len(instanceId) == 0) and 'Unable to find' or 'Too many candidates')) return instanceId.pop() def _deriveVMOwner(self): instanceId = self._retrieveInstanceId() owner = self._getVMOwner(instanceId) return owner def _getDiskOwner(self, pdiskImageId): return self.pdisk.getValue(self._OWNER_KEY, pdiskImageId) def _getDiskVisibility(self, pdiskImageId): return self.pdisk.getValue(self._VISIBILITY_KEY, pdiskImageId)
class StratuslabClientCloud(BaseCloudConnector): RUNINSTANCE_RETRY_TIMEOUT = 3 cloudName = 'stratuslab' def __init__(self, slipstreamConfigHolder): super(StratuslabClientCloud, self).__init__(slipstreamConfigHolder) self.slConfigHolder = StratuslabConfigHolder(slipstreamConfigHolder.options, slipstreamConfigHolder.config) self.listener = CreatorBaseListener(verbose=(self.verboseLevel > 1)) self.setCapabilities(contextualization=True, direct_ip_assignment=True, orchestrator_can_kill_itself_or_its_vapp=True) patchStratuslab() def startImage(self, user_info, image_info): self._prepareMachineForBuildImage() self.slConfigHolder.set('marketplaceEndpoint', user_info.get_cloud('marketplace.endpoint')) manifestDownloader = ManifestDownloader(self.slConfigHolder) imageId = self.getImageId(image_info) image_info['imageVersion'] = manifestDownloader.getImageVersion(imageId=imageId) self._updateStratuslabConfigHolderForBuildImage(user_info, image_info) self.creator = Creator(imageId, self.slConfigHolder) self.creator.setListener(self.listener) createImageTemplateDict = self.creator._getCreateImageTemplateDict() msgData = StratuslabClientCloud._getCreateImageTemplateMessaging(image_info, self._getCloudInstanceName()) def ourCreateTemplateDict(): createImageTemplateDict.update(msgData) return createImageTemplateDict self.creator._getCreateImageTemplateDict = ourCreateTemplateDict self.creator.createStep1() self.addVm(NodeDecorator.MACHINE_NAME, self.creator.runner) return self.getVmsDetails() def _buildImage(self, userInfo, imageInfo): #self.creator.create() self.creator.createStep2() # # if messaging is set to 'pdisk', then try polling for the new image # identifier from the storage system; otherwise will just return empty # string # self._newImageId = self._pollStorageForNewImage(self.slConfigHolder) def _pollStorageForNewImage(self, slConfigHolder): newImageId = '' msg_type = os.environ.get('SLIPSTREAM_MESSAGING_TYPE', None) msg_endpoint = os.environ.get('SLIPSTREAM_MESSAGING_ENDPOINT', None) if msg_type and msg_endpoint: if msg_type == 'pdisk': diid = os.environ.get('SLIPSTREAM_DIID', None) if diid: tag = "SlipStream-%s" % diid filters = {'tag': [tag, ]} slConfigHolder.set('pdiskEndpoint', msg_endpoint) pdisk = VolumeManagerFactory.create(slConfigHolder) print >> sys.stdout, "Searching on %s for disk with tag %s." % (msg_endpoint, tag) sys.stdout.flush() # hardcoded polling for 30' at 1' intervals for i in range(30): print >> sys.stdout, "Search iteration %d" % i sys.stdout.flush() volumes = pdisk.describeVolumes(filters) if len(volumes) > 0: try: newImageId = volumes[0]['identifier'] except Exception as e: print "Exception occurred looking for volume: %s" % e pass break time.sleep(60) print "Returning new image ID value: %s" % newImageId return newImageId @staticmethod def _getCreateImageTemplateMessaging(imageInfo, cloud_instance_name): msg_type = os.environ.get('SLIPSTREAM_MESSAGING_TYPE', None) if msg_type: imageResourceUri = BaseCloudConnector.getResourceUri(imageInfo) + '/' + cloud_instance_name message = StratuslabClientCloud._getCreateImageMessagingMessage(imageResourceUri) msgData = {Runner.CREATE_IMAGE_KEY_MSG_TYPE: msg_type, Runner.CREATE_IMAGE_KEY_MSG_ENDPOINT: os.environ['SLIPSTREAM_MESSAGING_ENDPOINT'], Runner.CREATE_IMAGE_KEY_MSG_MESSAGE: message} if msg_type in ('amazonsqs', 'dirq'): msgData.update({Runner.CREATE_IMAGE_KEY_MSG_QUEUE: os.environ['SLIPSTREAM_MESSAGING_QUEUE']}) elif msg_type == 'rest': msgData.update({Runner.CREATE_IMAGE_KEY_MSG_QUEUE: imageResourceUri}) elif msg_type == 'pdisk': msgData = {} else: raise Exceptions.ExecutionException('Unsupported messaging type: %s' % msg_type) else: msgData = {} return msgData @staticmethod def _getCreateImageMessagingMessage(imageResourceUri): return base64.b64encode('{"uri":"%s", "imageid":""}' % imageResourceUri) def initialization(self, user_info): self.slConfigHolder.options.update(Runner.defaultRunOptions()) self._setUserInfoOnStratuslabConfigHolder(user_info) def _startImage(self, user_info, image_info, instance_name, cloudSpecificData=None): configHolder = self.slConfigHolder.deepcopy() self._setInstanceParamsOnConfigHolder(configHolder, image_info) imageId = self.getImageId(image_info) self._setExtraContextDataOnConfigHolder(configHolder, cloudSpecificData) self._setVmNameOnConfigHolder(configHolder, instance_name) runner = self._runInstance(imageId, configHolder) return runner def _getCloudSpecificData(self, node_info, node_number, nodename): return nodename def vmGetIp(self, runner): return runner.instancesDetail[0]['ip'] def vmGetId(self, runner): return runner.instancesDetail[0]['id'] def _setInstanceParamsOnConfigHolder(self, configHolder, image): self._setInstanceSizeOnConfigHolder(configHolder, image) self._setExtraDisksOnConfigHolder(configHolder, image) self._setNetworkTypeOnConfigHolder(configHolder, image) def _setInstanceSizeOnConfigHolder(self, configHolder, image): self._setInstanceTypeOnConfigHolder(configHolder, image) self._setCpuRamOnConfigHolder(configHolder, image) def _setInstanceTypeOnConfigHolder(self, configHolder, image): configHolder.instanceType = self._getInstanceType(image) def _setCpuRamOnConfigHolder(self, configHolder, image): configHolder.vmCpu = self._getImageCpu(image) or None vmRamGb = self._getImageRam(image) or None if vmRamGb: try: # StratusLab needs value in MB configHolder.vmRam = str(int(vmRamGb.strip()) * 1024) except: pass def _setExtraDisksOnConfigHolder(self, configHolder, image): extra_disks = self.getExtraDisks(image) # 'extra_disk_volatile' is given in GB - 'extraDiskSize' needs to be in MB configHolder.extraDiskSize = int(extra_disks.get('extra.disk.volatile', 0) or 0) * 1024 configHolder.persistentDiskUUID = extra_disks.get('extra_disk_persistent', '') configHolder.readonlyDiskId = extra_disks.get('extra_disk_readonly', '') def _setExtraContextDataOnConfigHolder(self, configHolder, nodename): configHolder.extraContextData = '#'.join( ['%s=%s' % (k, v) for (k, v) in os.environ.items() if k.startswith('SLIPSTREAM_')]) configHolder.extraContextData += '#SLIPSTREAM_NODENAME=%s' % nodename configHolder.extraContextData += '#SCRIPT_EXEC=%s' % self._buildSlipStreamBootstrapCommand(nodename) def _setVmNameOnConfigHolder(self, configHolder, nodename): configHolder.vmName = nodename def _runInstance(self, imageId, configHolder, max_attempts=3): if max_attempts <= 0: max_attempts = 1 attempt = 1 while True: try: runner = self._doRunInstance(imageId, configHolder) except socket.error, ex: if attempt >= max_attempts: raise Exceptions.ExecutionException( "Failed to launch instance after %i attempts: %s" % (attempt, str(ex))) time.sleep(self.RUNINSTANCE_RETRY_TIMEOUT) attempt += 1 else: return runner
class TMQuarantine(object): """Quarantine the files for a terminated virtual machine""" # Debug option PRINT_TRACE_ON_ERROR = True DEFAULT_VERBOSE_LEVEL = 0 # Position of the provided args _ARG_SRC_POS = 1 _PDISK_PORT = 8445 def __init__(self, args, **kwargs): self.args = args self.diskSrcPath = None self.diskSrcHost = None self.vmDir = None self.diskName = None self.pdiskHostPort = None self.snapshotMarketplaceId = None self.targetMarketplace = None self.createdPDiskId = None self.p12cert = '' self.p12pswd = None self.pdiskEndpoint = None self.pdiskPath = None self.pdiskPathNew = None self.originImageIdUrl = None self.originImageId = None self.originMarketPlace = None self.instanceId = None self.cloud = None self.rootVolumeUuid = None self.persistentDiskIp = None self.persistentDiskLvmDevice = None self._initFromConfig(kwargs.get('conf_filename', '')) self._initCloudConnector() def run(self): try: self._run() finally: self._cleanup() def _run(self): self._checkArgs() self._parseArgs() self._retrieveInstanceId() self._retrieveVmDir() self._retrieveAttachedVolumeInfo() self._detachAllVolumes() self._changeOwnerOfSnapshotVolume() self._moveFilesToQuarantine() def _initFromConfig(self, conf_filename=''): config = ConfigHolder.configFileToDictWithFormattedKeys(conf_filename or defaultConfigFile) options = PDiskEndpoint.options() self.configHolder = ConfigHolder(options, config) self.configHolder.set('pdiskEndpoint', self._createPdiskEndpoint()) self.configHolder.set('verboseLevel', self.DEFAULT_VERBOSE_LEVEL) self.configHolder.assign(self) def _initCloudConnector(self): credentials = LocalhostCredentialsConnector(self.configHolder) self.cloud = CloudConnectorFactory.getCloud(credentials) self.cloud.setEndpointFromParts('localhost', self.configHolder.onePort) def _checkArgs(self): if len(self.args) != 2: raise ValueError('Invalid number of arguments') def _parseArgs(self): src = self.args[self._ARG_SRC_POS] self.diskSrcPath = self._getDiskPath(src) self.diskSrcHost = self._getDiskHost(src) # FIXME: duplicates should be pulled into common location def _createPdiskEndpoint(self): host = self.configHolder.persistentDiskIp port = self.configHolder.persistentDiskPort or _PDISK_PORT path = self.configHolder.persistentDiskPath or '' path = path.strip('/') return 'https://%s:%s/%s' % (host, port, path) def _changeOwnerOfSnapshotVolume(self): pdisk = VolumeManagerFactory.create(self.configHolder) # root volume may not exist, if this is an image creation # only actually change ownership of snapshot volumes if self.rootVolumeUuid: disk_identifier = pdisk.getValue('identifier', self.rootVolumeUuid) if re.match('.*snapshot.*', disk_identifier): pdisk.quarantineVolume(self.rootVolumeUuid) def _moveFilesToQuarantine(self): instance_dir = os.path.join(self.vmDir, str(self.instanceId)) quarantine_dir = os.path.join(self.vmDir, 'quarantine') self._moveFilesToQuarantineLocal(instance_dir, quarantine_dir) self._moveFilesToQuarantineHypervisor(instance_dir, quarantine_dir) def _moveFilesToQuarantineLocal(self, instance_dir, quarantine_dir): shutil.move(instance_dir, quarantine_dir) def _moveFilesToQuarantineHypervisor(self, instance_dir, quarantine_dir): # If the storage area is on NFS and shared between the server # and the hypervisor, this will always fail. Simply try to do # this and ignore errors if they arise, hoping that the error is # because the quarantine has already been done. self._sshDst(['mv', instance_dir, quarantine_dir], 'Failed to quarantine VM on hypervisor.', True) #-------------------------------------------- # Persistent disk and related #-------------------------------------------- def _retrieveAttachedVolumeInfo(self): uris = self._getAttachedVolumeURIs() self.attachedVolumeURIs = uris def _getAttachedVolumeURIs(self): register_filename_contents = self._sshDst(['/usr/sbin/stratus-list-registered-volumes.py', '--vm-id', str(self.instanceId)], 'Unable to get registered volumes') return self._sanitizeVolumeURIs(register_filename_contents.splitlines()) def _sanitizeVolumeURIs(self, volume_uris): "Filtering assumes that the disk's name is UUID." return filter(lambda x: is_uuid(self._getDiskNameFromURI(x.strip())), volume_uris) def _getDiskNameFromURI(self, uri): return uri.strip().strip('/').split('/').pop() def _getPDiskHostPortFromURI(self, uri): splittedUri = uri.split(':') self._assertLength(splittedUri, 4) return ':'.join(splittedUri[1:3]) def _detachAllVolumes(self): pdisk = VolumeManagerFactory.create(self.configHolder) msg = '' self.rootVolumeUuid = None for pdisk_uri in self.attachedVolumeURIs: pdisk_uri = pdisk_uri.strip() if pdisk_uri: # saves the root volume uuid so that the ownership can be changed later if not self.rootVolumeUuid: self.rootVolumeUuid = self._getDiskNameFromURI(pdisk_uri) try: self._detachSingleVolume(pdisk, pdisk_uri) except Exception as e: msg += str(e) + "\n" if msg: raise Exception(msg) def _detachSingleVolume(self, pdisk, pdisk_uri): uuid = self._getDiskNameFromURI(pdisk_uri) turl = pdisk.getTurl(uuid) self._sshDst(['/usr/sbin/stratus-pdisk-client.py', '--pdisk-id', pdisk_uri, '--vm-id', str(self.instanceId), '--turl', turl, '--register', '--mark', '--attach', '--op', 'down'], 'Unable to detach pdisk "%s with TURL %s on VM %s"' % (pdisk_uri, turl, str(self.instanceId))) #-------------------------------------------- # Utility #-------------------------------------------- def _assertLength(self, elem, size): if len(elem) != size: raise ValueError('List should have %s element(s), got %s' % (size, len(elem))) def _getDiskPath(self, arg): return self._getStringPart(arg, 1) def _getDiskHost(self, arg): return self._getStringPart(arg, 0) def _findNumbers(self, elems): findedNb = [] for nb in elems: try: findedNb.append(int(nb)) except Exception: pass return findedNb def _getStringPart(self, arg, part, nbPart=2, delimiter=':'): path = arg.split(delimiter) self._assertLength(path, nbPart) return path[part] def _retrieveInstanceId(self): pathElems = self.diskSrcPath.split('/') instanceId = self._findNumbers(pathElems) errorMsg = '%s instance ID in path. ' + 'Path is "%s"' % self.diskSrcPath if len(instanceId) != 1: raise ValueError(errorMsg % ((len(instanceId) == 0) and 'Unable to find' or 'Too many candidates')) self.instanceId = instanceId.pop() def _retrieveVmDir(self): self.vmDir = dirname(dirname(self.diskSrcPath)) def _sshDst(self, cmd, errorMsg, dontRaiseOnError=False): return self._ssh(self.diskSrcHost, cmd, errorMsg, dontRaiseOnError) def _ssh(self, host, cmd, errorMsg, dontRaiseOnError=False): retCode, output = sshCmdWithOutput(' '.join(cmd), host, user=getuser(), sshKey=sshPublicKeyLocation.replace('.pub', '')) if not dontRaiseOnError and retCode != 0: raise Exception('%s\n: Error: %s' % (errorMsg, output)) return output def _cleanup(self): pass