def teardown_images(self): """ It scans for all the available images and volumes on the hosted-engine storage domain and for each of them calls teardownImage on VDSM. teardownImage will remove the related symlinks and it will deactivate the LV if on block devices. """ self._log.info("Teardown images") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT ) images = self.get_images_list(cli) for imgUUID in images: vm_vol_uuid_list = cli.getVolumesList( imageID=imgUUID, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(vm_vol_uuid_list) if vm_vol_uuid_list['status']['code'] == 0: for volUUID in vm_vol_uuid_list['items']: self._log.debug( "Teardown image {storagepoolID} {storagedomainID} " "{imageID} {volumeID}".format( storagepoolID=self._spUUID, storagedomainID=self._sdUUID, imageID=imgUUID, volumeID=volUUID, ) ) status = cli.teardownImage( storagepoolID=self._spUUID, storagedomainID=self._sdUUID, imageID=imgUUID, volumeID=volUUID, ) self._log.debug('Status: {status}'.format(status=status)) if status['status']['code'] != 0: self._log.error( ( 'Error teardown image - sp_uuid: {spuuid} - ' 'sd_uuid: {sduuid} - ' 'img_uuid: {imguuid} - ' 'vol_uuid: {voluuid}: {message}' ).format( spuuid=self._spUUID, sduuid=self._sdUUID, imguuid=imgUUID, voluuid=volUUID, message=status['status']['message'], ) ) else: self._log.error( 'Error fetching volumes list: {msg}'.format( msg=vm_vol_uuid_list['status']['message'], ) )
def calculate_load(self): dtotal = self.system['cur'].total - self.system['prev'].total dbusy = self.system['cur'].busy - self.system['prev'].busy load = dbusy / float(dtotal) cli = util.connect_vdsm_json_rpc(logger=self._log) engine_load = 0.0 try: stats = cli.VM.getStats(vmID=self._vm_uuid)[0] vm_cpu_total = float(stats["cpuUser"]) + float(stats["cpuSys"]) cpu_count = multiprocessing.cpu_count() engine_load = (vm_cpu_total / cpu_count) / 100.0 except ServerError as e: if e.code == vdsm_exception.NoSuchVM.code: self._log.info("VM not on this host", extra=log_filter.lf_args('vm', 60)) else: self._log.error(e, extra=log_filter.lf_args('vm', 60)) except KeyError: self._log.info( "VM stats do not contain cpu usage. VM might be down.", extra=log_filter.lf_args('vm', 60)) except ValueError as e: self._log.error("Error getting cpuUser: %s", str(e)) load_no_engine = load - engine_load load_no_engine = max(load_no_engine, 0.0) self._log.info( "System load" " total={0:.4f}, engine={1:.4f}, non-engine={2:.4f}".format( load, engine_load, load_no_engine)) self.load = load_no_engine
def setVmTicket(args): cli = ohautil.connect_vdsm_json_rpc() cli.VM.updateDevice( vmID=args.vmid, params={ 'deviceType': 'graphics', 'existingConnAction': 'keep', 'graphicsType': 'vnc', 'params': {}, 'ttl': args.ttl, 'password': args.password, } ) vmstats = cli.VM.getStats( vmID=args.vmid, ) displayinfo = vmstats[0]['displayInfo'] vnc = [x for x in displayinfo if x['type'] == 'vnc'] if vnc: print( ( "You can now connect the hosted-engine VM with VNC at " "{ip}:{port}" ).format( ip=vnc[0]['ipAddress'], port=vnc[0]['port'], ) )
def _late_setup(self): # We need vdsmd up for customization checks if not self.services.status( name=self.environment[ohostedcons.VDSMEnv.VDSMD_SERVICE]): rc, _stdout, _stderr = self.execute( ( self.command.get('vdsm-tool'), 'configure', '--force', ), raiseOnError=False, ) if rc != 0: raise RuntimeError(_('Failed to reconfigure libvirt for VDSM')) if not self.services.supportsDependency: if self.services.exists('cgconfig'): self.services.state('cgconfig', True) if self.services.exists('messagebus'): self.services.state('messagebus', True) if self.services.exists('libvirtd'): self.services.state('libvirtd', True) self.services.state( name=self.environment[ohostedcons.VDSMEnv.VDSMD_SERVICE], state=True) self.environment[ ohostedcons.VDSMEnv.VDS_CLI] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def disconnect_storage_server(self, timeout=constants.VDSCLI_SSL_TIMEOUT): """ Disconnect the hosted-engine domain storage server """ self._log.info("Disconnecting storage server") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=timeout, ) # normalize_path=False since we want to be sure we really disconnect # from where we were connected also if its path was wrong conList, storageType = self._get_conlist(cli, normalize_path=False) if conList: try: status = cli.StoragePool.disconnectStorageServer( storagepoolID=self._spUUID, domainType=storageType, connectionParams=conList, ) self._log.debug(status) except ServerError as e: raise RuntimeError( ( 'Disconnection to storage server failed, unable ' 'to recover: {message} - Please try rebooting the ' 'host to reach a consistent status' ).format( message=str(e) ) )
def action(self, options): cli = util.connect_vdsm_json_rpc( logger=self._log ) try: caps = cli.Host.getCapabilities() except ServerError as e: self._log.error(e) self.update_result(None) return if 'bridges' not in caps: self._log.error("Failed to getVdsCapabilities: " "No 'bridges' in result") self.update_result(None) return if self._bridge in caps['bridges']: if 'ports' in caps['bridges'][self._bridge]: self._log.info("Found bridge %s with ports", self._bridge, extra=log_filter.lf_args('status', 60)) self.update_result(True) else: self._log.info("Found bridge %s with no ports", self._bridge, extra=log_filter.lf_args('status', 60)) self.update_result(False) else: self._log.info("Bridge %s not found", self._bridge, extra=log_filter.lf_args('status', 60)) self.update_result(False)
def disconnect_storage_server(self, timeout=constants.VDSCLI_SSL_TIMEOUT): """ Disconnect the hosted-engine domain storage server """ self._log.info("Disconnecting storage server") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=timeout, ) # normalize_path=False since we want to be sure we really disconnect # from where we were connected also if its path was wrong conList, storageType = self._get_conlist(cli, normalize_path=False) if conList: try: status = cli.StoragePool.disconnectStorageServer( storagepoolID=self._spUUID, domainType=storageType, connectionParams=conList, ) self._log.debug(status) except ServerError as e: raise RuntimeError( ('Disconnection to storage server failed, unable ' 'to recover: {message} - Please try rebooting the ' 'host to reach a consistent status').format( message=str(e)))
def get_volume_path(sp_uuid, sd_uuid, img_uuid, vol_uuid): """ Return path of the volume file inside the domain :param sp_uuid: StoragePool UUID :param sd_uuid: StorageDomain UUID :param img_uuid: Image UUID :param vol_uuid: Volume UUID :returns: The local path of the required volume """ volume_path = os.path.join(envconst.SD_RUN_DIR, sd_uuid, img_uuid, vol_uuid) if os.path.exists(volume_path): return volume_path try: cli = connect_vdsm_json_rpc() volume_path = cli.Image.prepare(storagepoolID=sp_uuid, storagedomainID=sd_uuid, imageID=img_uuid, volumeID=vol_uuid) return volume_path except ServerError as e: raise RuntimeError('Path to volume {vol_uuid} not found in {root}.' ' Caused by: {err}'.format(vol_uuid=vol_uuid, root=envconst.SD_RUN_DIR, err=e))
def shutdown(args): cli = ohautil.connect_vdsm_json_rpc() cli.VM.shutdown( vmID=args.vmid, delay=args.delay, message=args.message, )
def action(self, options): cli = util.connect_vdsm_json_rpc(logger=self._log) try: stats = cli.Host.getStats() except ServerError as e: self._log.error(e) self.update_result(None) return if 'network' not in stats: self._log.error("Failed to getVdsStats: " "No 'network' in result") self.update_result(None) return if self._bridge in stats['network']: if ('state' in stats['network'][self._bridge] and stats['network'][self._bridge]['state'] == 'up'): self._log.info("Found bridge %s in up state", self._bridge, extra=log_filter.lf_args('status', 60)) self.update_result(True) else: self._log.info("Found bridge %s not in up state", self._bridge, extra=log_filter.lf_args('status', 60)) self.update_result(False) else: self._log.info("Bridge %s not found", self._bridge, extra=log_filter.lf_args('status', 60)) self.update_result(False)
def _boot_from_hd(self): # Temporary attach cloud-init no-cloud iso if we have to if ( self.environment[ohostedcons.VMEnv.BOOT] == 'disk' and self.environment[ohostedcons.VMEnv.CDROM] ): self.environment[ ohostedcons.VMEnv.SUBST ]['@CDROM@'] = self.environment[ ohostedcons.VMEnv.CDROM ] created = False while not created: try: self._create_vm() created = True except socket.error as e: self.logger.debug( 'Error talking with VDSM (%s), reconnecting.' % str(e), exc_info=True ) self.environment[ ohostedcons.VDSMEnv.VDS_CLI ] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def shutdown(args): cli = ohautil.connect_vdsm_json_rpc() cli.VM.shutdown( vmID=args.vmid, delay=args.delay, message=args.message, )
def setVmTicket(args): cli = ohautil.connect_vdsm_json_rpc() cli.VM.updateDevice( vmID=args.vmid, params={ 'deviceType': 'graphics', 'existingConnAction': 'keep', 'graphicsType': 'vnc', 'params': {}, 'ttl': args.ttl, 'password': args.password, } ) vmstats = cli.VM.getStats( vmID=args.vmid, ) displayinfo = vmstats[0]['displayInfo'] vnc = [x for x in displayinfo if x['type'] == 'vnc'] if vnc: print( ( "You can now connect the hosted-engine VM with VNC at " "{ip}:{port}" ).format( ip=vnc[0]['ipAddress'], port=vnc[0]['port'], ) )
def _closeup(self): # We need to reconnect cause host-deploy # restarted vdsm adding the host self.environment[ ohostedcons.VDSMEnv.VDS_CLI] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def _setup(self): self.dialog.note( _( 'During customization use CTRL-D to abort.' ) ) interactive = self.environment[ ohostedcons.CoreEnv.ROLLBACK_PROCEED ] is None if interactive: self.environment[ ohostedcons.CoreEnv.ROLLBACK_PROCEED ] = self.dialog.queryString( name=ohostedcons.Confirms.ROLLBACK_PROCEED, note=ohostedutil.readmeFileContent( ohostedcons.FileLocations.README_ROLLBACK ) + _( 'Continuing will rollback the engine VM from a previous ' 'upgrade attempt.\n' 'This procedure will restore an engine VM image ' 'from a backup taken during an upgrade attempt.\n' 'The result of any action occurred after the backup ' 'creation instant could be definitively lost.\n' 'Are you sure you want to continue? ' '(@VALUES@)[@DEFAULT@]: ' ), # TODO: point to our site for troubleshooting info... prompt=True, validValues=(_('Yes'), _('No')), caseSensitive=False, default=_('Yes') ) == _('Yes').lower() if not self.environment[ohostedcons.CoreEnv.ROLLBACK_PROCEED]: raise otopicontext.Abort('Aborted by user') self.environment[ ohostedcons.CoreEnv.ROLLBACK_UPGRADE ] = True self.environment[ ohostedcons.VDSMEnv.VDS_CLI ] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, ) self.environment.setdefault( ohostedcons.CoreEnv.REQUIREMENTS_CHECK_ENABLED, True ) try: # avoid: pyflakes 'Config' imported but unused error import ovirt.node.utils.fs if hasattr(ovirt.node.utils.fs, 'Config'): self.environment[ohostedcons.CoreEnv.NODE_SETUP] = True except ImportError: self.logger.debug('Disabling persisting file configuration')
def _closeup(self): # We need to reconnect cause host-deploy # restarted vdsm adding the host self.environment[ ohostedcons.VDSMEnv.VDS_CLI ] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def setVmTicket(args): cli = ohautil.connect_vdsm_json_rpc() cli.VM.setTicket( vmID=args.vmid, password=args.password, ttl=args.ttl, existingConnAction='keep', params={}, )
def connect_storage_server(self, timeout=constants.VDSCLI_SSL_TIMEOUT): """ Connect the hosted-engine domain storage server """ self._log.info("Connecting storage server") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=timeout, ) conList, storageType = self._get_conlist(cli, normalize_path=True) if conList: self._log.info("Connecting storage server") try: connections = cli.StoragePool.connectStorageServer( storagepoolID=self._spUUID, domainType=storageType, connectionParams=conList, ) self._log.debug(connections) except ServerError as e: raise RuntimeError( 'Connection to storage server failed: %s' % str(e) ) connected = False for con in connections: if con['status'] == 0: connected = True else: if len(connections) > 1: con_details = {} for ce in conList: if con['id'] == ce['id']: con_details = ce self._log.warning( ( 'A connection path to the storage server is ' 'not active, details: {con_details}' ).format( con_details=con_details, ) ) if not connected: raise RuntimeError( 'Connection to storage server failed' ) self._log.info("Refreshing the storage domain") # calling getStorageDomainStats has the side effect of # causing a Storage Domain refresh including # all its tree under /rhev/data-center/... try: cli.StorageDomain.getStats(storagedomainID=self._sdUUID) except ServerError as e: self._log.debug("Error refreshing storage domain: %s", str(e))
def create(args): vm_params = vmconf.parseVmConfFile(args.filename) # Send only libvirt xml if it is present in the vm.conf xml = vm_params.get('xml') cli = ohautil.connect_vdsm_json_rpc() cli.VM.create( vmID=vm_params['vmId'], vmParams={'xml': xml} if xml is not None else vm_params )
def _misc(self): self.logger.info( _('Starting {service}').format( service=self.environment[ ohostedcons.VDSMEnv.VDSMD_SERVICE ], ) ) self.services.startup( name=self.environment[ ohostedcons.VDSMEnv.VDSMD_SERVICE ], state=True, ) # We need to reconfigure and restart for reloading the configuration self.services.state( name=self.environment[ ohostedcons.VDSMEnv.VDSMD_SERVICE ], state=False, ) rc, _stdout, _stderr = self.execute( ( self.command.get('vdsm-tool'), 'configure', '--force', ), raiseOnError=False, ) if rc != 0: raise RuntimeError( _( 'Failed to reconfigure libvirt for VDSM' ) ) if not self.services.supportsDependency: if self.services.exists('cgconfig'): self.services.state('cgconfig', True) if self.services.exists('messagebus'): self.services.state('messagebus', True) if self.services.exists('libvirtd'): self.services.state('libvirtd', True) self.services.state( name=self.environment[ ohostedcons.VDSMEnv.VDSMD_SERVICE ], state=True, ) self.environment[ ohostedcons.VDSMEnv.VDS_CLI ] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def _setup(self): self.dialog.note(_('During customization use CTRL-D to abort.')) interactive = self.environment[ ohostedcons.CoreEnv.UPGRADE_PROCEED] is None if interactive: self.environment[ ohostedcons.CoreEnv.UPGRADE_PROCEED] = self.dialog.queryString( name=ohostedcons.Confirms.UPGRADE_PROCEED, note=ohostedutil.readmeFileContent( ohostedcons.FileLocations.README_APPLIANCE) + _('Continuing will upgrade the engine VM running on this ' 'hosts deploying and configuring ' 'a new appliance.\n' 'If your engine VM is already based on el7 you can also ' 'simply upgrade the engine there.\n' 'This procedure will create a new disk on the ' 'hosted-engine storage domain and it will backup ' 'there the content of your current engine VM disk.\n' 'The new el7 based appliance will be deployed over the ' 'existing disk destroying its content; ' 'at any time you will be able to rollback using the ' 'content of the backup disk.\n' 'You will be asked to take a backup of the running engine ' 'and copy it to this host.\n' 'The engine backup will be automatically injected ' 'and recovered on the new appliance.\n' 'Are you sure you want to continue? ' '(@VALUES@)[@DEFAULT@]: '), # TODO: point to our site for troubleshooting info... prompt=True, validValues=(_('Yes'), _('No')), caseSensitive=False, default=_('Yes')) == _('Yes').lower() if not self.environment[ohostedcons.CoreEnv.UPGRADE_PROCEED]: raise otopicontext.Abort('Aborted by user') self.environment[ohostedcons.CoreEnv.UPGRADING_APPLIANCE] = True self.environment[ ohostedcons.VDSMEnv.VDS_CLI] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, ) self.environment.setdefault( ohostedcons.CoreEnv.REQUIREMENTS_CHECK_ENABLED, True) try: # avoid: pyflakes 'Config' imported but unused error import ovirt.node.utils.fs if hasattr(ovirt.node.utils.fs, 'Config'): self.environment[ohostedcons.CoreEnv.NODE_SETUP] = True except ImportError: self.logger.debug('Disabling persisting file configuration')
def scan(self): self._ovf_store_imgUUID = None self._ovf_store_volUUID = None _cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT ) imgs = image.Image(self._type, self._sdUUID) imageslist = imgs.get_images_list(_cli) for img_uuid in imageslist: volumeslist = _cli.getVolumesList( imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log .debug(volumeslist) if volumeslist['status']['code'] != 0: raise RuntimeError(volumeslist['status']['message']) for vol_uuid in volumeslist['items']: volumeinfo = _cli.getVolumeInfo( volumeID=vol_uuid, imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeinfo) if volumeinfo['status']['code'] != 0: raise RuntimeError(volumeinfo['status']['message']) description = volumeinfo['description'] if ( 'Disk Description' in description and description[0] == '{' and description[-1] == '}' ): description_dict = json.loads(description) self._log.debug(description_dict) if description_dict['Disk Description'] == 'OVF_STORE': self._ovf_store_imgUUID = img_uuid self._ovf_store_volUUID = vol_uuid self._log.info( 'Found OVF_STORE: ' 'imgUUID:{img}, volUUID:{vol}'.format( img=self._ovf_store_imgUUID, vol=self._ovf_store_volUUID, ) ) if self._ovf_store_imgUUID is None or self._ovf_store_imgUUID is None: self._log.warning('Unable to find OVF_STORE') return False return True
def set_mode(self, mode): ha_cli = client.HAClient() if mode not in ( 'local', 'global', 'none', ): sys.stderr.write( _('Invalid maintenance mode: {0}\n').format(mode) ) return False m_local = (mode == 'local') m_global = (mode == 'global') if m_local: # Check that the engine VM is not running here vm_id = config.Config().get(config.ENGINE, const.HEVMID) cli = ohautil.connect_vdsm_json_rpc() try: vm_list = cli.Host.getVMList() except ServerError as e: sys.stderr.write( _("Failed communicating with VDSM: {e}").format(e=e) ) return False if vm_id in vm_list: sys.stderr.write(_( "Unable to enter local maintenance mode: " "the engine VM is running on the current host, " "please migrate it before entering local " "maintenance mode.\n" )) return False try: ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL, value=m_local, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.GLOBAL, value=m_global, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL_MANUAL, value=m_local, ) except socket.error: sys.stderr.write( _('Cannot connect to the HA daemon, please check the logs.\n') ) return False return True
def set_mode(self, mode): ha_cli = client.HAClient() if mode not in ( 'local', 'global', 'none', ): sys.stderr.write( _('Invalid maintenance mode: {0}\n').format(mode) ) return False m_local = (mode == 'local') m_global = (mode == 'global') if m_local: # Check that the engine VM is not running here vm_id = config.Config().get(config.ENGINE, const.HEVMID) cli = ohautil.connect_vdsm_json_rpc() try: vm_list = cli.Host.getVMList() except ServerError as e: sys.stderr.write( _("Failed communicating with VDSM: {e}").format(e=e) ) return False if vm_id in [ item['vmId'] for item in vm_list ]: sys.stderr.write(_( "Unable to enter local maintenance mode: " "the engine VM is running on the current host, " "please migrate it before entering local " "maintenance mode.\n" )) return False try: ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL, value=m_local, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.GLOBAL, value=m_global, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL_MANUAL, value=m_local, ) except socket.error: sys.stderr.write( _('Cannot connect to the HA daemon, please check the logs.\n') ) return False return True
def action(self, options): cli = util.connect_vdsm_json_rpc( logger=self._log ) try: stats = cli.Host.getStats() except ServerError as e: self._log.error(e) self.update_result(None) return mem_free = str(stats['memFree']) self._log.info("memFree: %s", mem_free, extra=log_filter.lf_args('status', 60)) self.update_result(mem_free)
def create(args): vm_params = vmconf.parseVmConfFile(args.filename) # Send only libvirt xml if it is present in the vm.conf xml = vm_params.get('xml') cli = ohautil.connect_vdsm_json_rpc() try: response = cli.VM.create( vmID=vm_params['vmId'], vmParams={'xml': xml} if xml is not None else vm_params) if response['status'] != "WaitForLaunch": sys.stderr.write('VM failed to launch in the create function\n') sys.exit(1) except ServerError as e: sys.stderr.write(str(e) + '\n') sys.exit(1)
def action(self, options): try: cli = util.connect_vdsm_json_rpc(logger=self._log) status = cli.Host.getStorageRepoStats(domains=[self.sd_uuid]) except ServerError as e: self._log.error(str(e)) self.update_result(False) return try: valid = status[self.sd_uuid]['valid'] delay = float(status[self.sd_uuid]['delay']) if valid and delay <= constants.STORAGE_DELAY: self.update_result(True) return except Exception: self._log.warn("Hosted-engine storage domain is in invalid state") self.update_result(False)
def action(self, options): cli = util.connect_vdsm_json_rpc( logger=self._log ) try: stats = cli.Host.getStats() caps = cli.Host.getCapabilities() except ServerError as e: self._log.error(e) self.update_result(None) return mem_size = int(caps['memSize']) mem_used = int(stats['memUsed']) mem_load = float(mem_used) / mem_size self._log.info("memSize: %d, memUsed: %d, Load: %f", mem_size, mem_used, mem_load, extra=log_filter.lf_args('status', 60)) self.update_result(str(mem_load))
def get_domain_path(config_): """ Return path of storage domain holding engine vm """ cli = util.connect_vdsm_json_rpc( logger=None, timeout=constants.VDSCLI_SSL_TIMEOUT ) sd_uuid = config_.get(config.ENGINE, config_constants.SD_UUID) dom_type = config_.get(config.ENGINE, config_constants.DOMAIN_TYPE) parent = constants.SD_MOUNT_PARENT if dom_type in ( constants.DOMAIN_TYPE_NFS, constants.DOMAIN_TYPE_NFS3, constants.DOMAIN_TYPE_NFS4, constants.DOMAIN_TYPE_GLUSTERFS, ): try: response = cli.StorageDomain.getInfo(storagedomainID=sd_uuid) path = canonize_file_path( dom_type, response['remotePath'], sd_uuid ) if os.access(path, os.F_OK): return path except (ServerError, KeyError): # don't have remotePath? so fallback to old logic pass # fallback in case of getStorageDomainInfo call fails # please note that this code will get stuck if some of # the storage domains is not accessible rhbz#1140824 if dom_type == constants.DOMAIN_TYPE_GLUSTERFS: parent = os.path.join(parent, constants.SD_GLUSTER_PREFIX) for dname in os.listdir(parent): path = os.path.join(parent, dname, sd_uuid) if os.access(path, os.F_OK): return path raise Exception("path to storage domain {0} not found in {1}" .format(sd_uuid, parent))
def action(self, options): # First, see if vdsm tells us it's up cli = util.connect_vdsm_json_rpc(logger=self._log) # Get timestamp before RPC call, so any future event with # status change will have a newer timestamp local_ts = monotonic.time() try: stats = cli.VM.getStats(vmID=self._vm_uuid)[0] except ServerError as e: if e.code == vdsm_exception.NoSuchVM.code: self._log.info("VM not on this host", extra=log_filter.lf_args('status', 60)) if self._vm_state == engine.VMState.UP: self._vm_state = engine.VMState.DOWN_MISSING d = { 'vm': self._vm_state, 'health': engine.Health.BAD, 'detail': 'unknown', 'reason': 'vm not running on this host' } else: self._log.error(e) d = { 'vm': 'unknown', 'health': 'unknown', 'detail': 'unknown', 'reason': 'failed to getVmStats' } with self._lock: self._stats_local_timestamp = local_ts self._stats_vdsm_timestamp = None self.update_result(json.dumps(d)) return # Convert timestamp to string in case it is an int vdsm_ts = str(stats.get("statusTime")) self._update_stats(stats, vdsm_ts, local_ts)
def create(args): vm_params = vmconf.parseVmConfFile(args.filename) # Send only libvirt xml if it is present in the vm.conf xml = vm_params.get('xml') cli = ohautil.connect_vdsm_json_rpc() try: response = cli.VM.create( vmID=vm_params['vmId'], vmParams={'xml': xml} if xml is not None else vm_params ) if response['status'] != "WaitForLaunch": sys.stderr.write('VM failed to launch in the create function\n') sys.exit(1) except ServerError as e: sys.stderr.write(str(e) + '\n') sys.exit(1)
def _boot_from_install_media(self): os_installed = False self._create_vm() while not os_installed: try: os_installed = check_liveliness.manualSetupDispatcher( self, check_liveliness.MSD_OS_INSTALLED, ) except socket.error as e: self.logger.debug( 'Error talking with VDSM (%s), reconnecting.' % str(e), exc_info=True ) self.environment[ ohostedcons.VDSMEnv.VDS_CLI ] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def _boot_from_hd(self): # Temporary attach cloud-init no-cloud iso if we have to if self.environment[ohostedcons.VMEnv.CDROM]: self.environment[ohostedcons.VMEnv.SUBST][ '@CDROM@'] = self.environment[ohostedcons.VMEnv.CDROM] created = False while not created: try: self._create_vm() created = True except socket.error as e: self.logger.debug( 'Error talking with VDSM (%s), reconnecting.' % str(e), exc_info=True) self.environment[ ohostedcons.VDSMEnv. VDS_CLI] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def action(self, options): try: cli = util.connect_vdsm_json_rpc( logger=self._log ) status = cli.Host.getStorageRepoStats(domains=[self.sd_uuid]) except ServerError as e: self._log.error(str(e)) self.update_result(False) return try: valid = status[self.sd_uuid]['valid'] delay = float(status[self.sd_uuid]['delay']) if valid and delay <= constants.STORAGE_DELAY: self.update_result(True) return except Exception: self._log.warn("Hosted-engine storage domain is in invalid state") self.update_result(False)
def _late_setup(self): # We need vdsmd up for customization checks if not self.services.status( name=self.environment[ ohostedcons.VDSMEnv.VDSMD_SERVICE ] ): rc, _stdout, _stderr = self.execute( ( self.command.get('vdsm-tool'), 'configure', '--force', ), raiseOnError=False, ) if rc != 0: raise RuntimeError( _( 'Failed to reconfigure libvirt for VDSM' ) ) if not self.services.supportsDependency: if self.services.exists('cgconfig'): self.services.state('cgconfig', True) if self.services.exists('messagebus'): self.services.state('messagebus', True) if self.services.exists('libvirtd'): self.services.state('libvirtd', True) self.services.state( name=self.environment[ ohostedcons.VDSMEnv.VDSMD_SERVICE ], state=True ) self.environment[ ohostedcons.VDSMEnv.VDS_CLI ] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def validate_storage_server(self): """ Checks the hosted-engine storage domain availability :return: True if available, False otherwise """ self._log.info("Validating storage server") cli = util.connect_vdsm_json_rpc(logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT) try: status = cli.Host.getStorageRepoStats(domains=[self._sdUUID]) except ServerError as e: self._log.error(str(e)) return False try: valid = status[self._sdUUID]['valid'] delay = float(status[self._sdUUID]['delay']) if valid and delay <= constants.LOOP_DELAY: return True except Exception: self._log.warn("Hosted-engine storage domain is in invalid state") return False
def setVmTicket(args): cli = ohautil.connect_vdsm_json_rpc() # TODO: handle also spice cli.VM.updateDevice(vmID=args.vmid, params={ 'deviceType': 'graphics', 'existingConnAction': 'keep', 'graphicsType': 'vnc', 'params': {}, 'ttl': args.ttl, 'password': args.password, }) vnc_port = None delay = 1 t = VNC_PORT_TIMEOUT while not vnc_port and t > 0: t = t - delay vmstats = cli.VM.getStats(vmID=args.vmid, ) displayinfo = vmstats[0]['displayInfo'] vnc = [x for x in displayinfo if x['type'] == 'vnc'] if vnc: try: vnc_p_i = int(vnc[0]['port']) except ValueError: vnc_p_i = 0 if vnc_p_i > 0: vnc_port = vnc[0]['port'] if not vnc_port: time.sleep(t) if vnc_port: print(("You can now connect the hosted-engine VM with VNC at " "{ip}:{port}").format( ip=vnc[0]['ipAddress'], port=vnc_port, )) else: sys.stderr.write('Failed detecting VNC port\n') sys.exit(1)
def action(self, options): # First, see if vdsm tells us it's up cli = util.connect_vdsm_json_rpc( logger=self._log ) # Get timestamp before RPC call, so any future event with # status change will have a newer timestamp local_ts = monotonic.time() try: stats = cli.VM.getStats(vmID=self._vm_uuid)[0] except ServerError as e: if e.code == vdsm_exception.NoSuchVM.code: self._log.info("VM not on this host", extra=log_filter.lf_args('status', 60)) if self._vm_state == engine.VMState.UP: self._vm_state = engine.VMState.DOWN_MISSING d = {'vm': self._vm_state, 'health': engine.Health.BAD, 'detail': 'unknown', 'reason': 'vm not running on this host'} else: self._log.error(e) d = {'vm': 'unknown', 'health': 'unknown', 'detail': 'unknown', 'reason': 'failed to getVmStats'} with self._lock: self._stats_local_timestamp = local_ts self._stats_vdsm_timestamp = None self.update_result(json.dumps(d)) return # Convert timestamp to string in case it is an int vdsm_ts = str(stats.get("statusTime")) self._update_stats(stats, vdsm_ts, local_ts)
def _misc(self): self.logger.info( _('Starting {service}').format( service=self.environment[ohostedcons.VDSMEnv.VDSMD_SERVICE], )) self.services.startup( name=self.environment[ohostedcons.VDSMEnv.VDSMD_SERVICE], state=True, ) # We need to reconfigure and restart for reloading the configuration self.services.state( name=self.environment[ohostedcons.VDSMEnv.VDSMD_SERVICE], state=False, ) rc, _stdout, _stderr = self.execute( ( self.command.get('vdsm-tool'), 'configure', '--force', ), raiseOnError=False, ) if rc != 0: raise RuntimeError(_('Failed to reconfigure libvirt for VDSM')) if not self.services.supportsDependency: if self.services.exists('cgconfig'): self.services.state('cgconfig', True) if self.services.exists('messagebus'): self.services.state('messagebus', True) if self.services.exists('libvirtd'): self.services.state('libvirtd', True) self.services.state( name=self.environment[ohostedcons.VDSMEnv.VDSMD_SERVICE], state=True, ) self.environment[ ohostedcons.VDSMEnv.VDS_CLI] = ohautil.connect_vdsm_json_rpc( logger=self.logger, timeout=ohostedcons.Const.VDSCLI_SSL_TIMEOUT, )
def get_domain_path(config_): """ Return path of storage domain holding engine vm """ cli = util.connect_vdsm_json_rpc(logger=None, timeout=constants.VDSCLI_SSL_TIMEOUT) sd_uuid = config_.get(config.ENGINE, config_constants.SD_UUID) dom_type = config_.get(config.ENGINE, config_constants.DOMAIN_TYPE) parent = constants.SD_MOUNT_PARENT if dom_type in ( constants.DOMAIN_TYPE_NFS, constants.DOMAIN_TYPE_NFS3, constants.DOMAIN_TYPE_NFS4, constants.DOMAIN_TYPE_GLUSTERFS, ): try: response = cli.StorageDomain.getInfo(storagedomainID=sd_uuid) path = canonize_file_path(dom_type, response['remotePath'], sd_uuid) if os.access(path, os.F_OK): return path except (ServerError, KeyError): # don't have remotePath? so fallback to old logic pass # fallback in case of getStorageDomainInfo call fails # please note that this code will get stuck if some of # the storage domains is not accessible rhbz#1140824 if dom_type == constants.DOMAIN_TYPE_GLUSTERFS: parent = os.path.join(parent, constants.SD_GLUSTER_PREFIX) for dname in os.listdir(parent): path = os.path.join(parent, dname, sd_uuid) if os.access(path, os.F_OK): return path raise Exception("path to storage domain {0} not found in {1}".format( sd_uuid, parent))
def get_volume_path(sp_uuid, sd_uuid, img_uuid, vol_uuid): """ Return path of the volume file inside the domain :param sp_uuid: StoragePool UUID :param sd_uuid: StorageDomain UUID :param img_uuid: Image UUID :param vol_uuid: Volume UUID :returns: The local path of the required volume """ volume_path = os.path.join( envconst.SD_RUN_DIR, sd_uuid, img_uuid, vol_uuid ) if os.path.exists(volume_path): return volume_path try: cli = connect_vdsm_json_rpc() volume_path = cli.Image.prepare( storagepoolID=sp_uuid, storagedomainID=sd_uuid, imageID=img_uuid, volumeID=vol_uuid ) return volume_path except ServerError as e: raise RuntimeError( 'Path to volume {vol_uuid} not found in {root}.' ' Caused by: {err}'.format( vol_uuid=vol_uuid, root=envconst.SD_RUN_DIR, err=e) )
def connect_storage_server(self): """ Connect the hosted-engine domain storage server """ self._log.info("Connecting storage server") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT ) conList, storageType = self._get_conlist(cli, normalize_path=True) if conList: self._log.info("Connecting storage server") status = cli.connectStorageServer( storagepoolID=self._spUUID, domainType=storageType, connectionParams=conList, ) self._check_connection(status) self._log.info("Refreshing the storage domain") # calling getStorageDomainStats has the side effect of # causing a Storage Domain refresh including # all its tree under /rhev/data-center/... cli.getStorageDomainStats(self._sdUUID)
def calculate_load(self): dtotal = self.system['cur'].total - self.system['prev'].total dbusy = self.system['cur'].busy - self.system['prev'].busy load = dbusy / float(dtotal) cli = util.connect_vdsm_json_rpc( logger=self._log ) engine_load = 0.0 try: stats = cli.VM.getStats(vmID=self._vm_uuid)[0] vm_cpu_total = float(stats["cpuUser"]) + float(stats["cpuSys"]) cpu_count = multiprocessing.cpu_count() engine_load = (vm_cpu_total / cpu_count) / 100.0 except ServerError as e: if e.code == vdsm_exception.NoSuchVM.code: self._log.info("VM not on this host", extra=log_filter.lf_args('vm', 60)) else: self._log.error(e, extra=log_filter.lf_args('vm', 60)) except KeyError: self._log.info( "VM stats do not contain cpu usage. VM might be down.", extra=log_filter.lf_args('vm', 60) ) except ValueError as e: self._log.error("Error getting cpuUser: %s", str(e)) load_no_engine = load - engine_load load_no_engine = max(load_no_engine, 0.0) self._log.info("System load" " total={0:.4f}, engine={1:.4f}, non-engine={2:.4f}" .format(load, engine_load, load_no_engine)) self.load = load_no_engine
def validate_storage_server(self): """ Checks the hosted-engine storage domain availability :return: True if available, False otherwise """ self._log.info("Validating storage server") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT ) try: status = cli.Host.getStorageRepoStats(domains=[self._sdUUID]) except ServerError as e: self._log.error(str(e)) return False try: valid = status[self._sdUUID]['valid'] delay = float(status[self._sdUUID]['delay']) if valid and delay <= constants.LOOP_DELAY: return True except Exception: self._log.warn("Hosted-engine storage domain is in invalid state") return False
def calculate_load(self): dtotal = self.system['cur'].total - self.system['prev'].total dbusy = self.system['cur'].busy - self.system['prev'].busy load = dbusy / float(dtotal) cli = util.connect_vdsm_json_rpc(logger=self._log) engine_load = 0.0 cpu_data_is_real = False vm_on_this_host = False try: stats = cli.VM.getStats(vmID=self._vm_uuid)[0] vm_on_this_host = True vm_cpu_total = float(stats["cpuUser"]) + float(stats["cpuSys"]) cpu_count = multiprocessing.cpu_count() engine_load = (vm_cpu_total / cpu_count) / 100.0 # This is a hack. vdsm initializes cpuUsage to 0.00, and when it # gets a result from libvirt (as 'cpu.user', 'cpu.system'), sets # it to libvirt's value. cpuUser and cpuSystem are also initialized # to '0.00', but can also have '0.00' as a legit value afterwards. # But cpuUsage, if it has a value from libvirt, is always an # integer. Actually, AFAICT, initializing it to '0.00' might be # considered a bug. Anyway, rely on this for deciding whether # cpuUser/cpuSystem are real or init values. # TODO: Extend VDSM's API to include this information explicitly, # e.g. by adding a new field, say 'stats_from_libvirt' which is # True or False, and base the decision on this. cpu_data_is_real = stats['cpuUsage'] != '0.00' except ServerError as e: if e.code == vdsm_exception.NoSuchVM.code: self._log.info("VM not on this host", extra=log_filter.lf_args('vm', 60)) self.latest_real_stats_ts = None else: self._log.error(e, extra=log_filter.lf_args('vm', 60)) except KeyError: self._log.info( "VM stats do not contain cpu usage. VM might be down.", extra=log_filter.lf_args('vm', 60)) except ValueError as e: self._log.error("Error getting cpuUser: %s", str(e)) load_no_engine = load - engine_load load_no_engine = max(load_no_engine, 0.0) if cpu_data_is_real or not vm_on_this_host: self._log.info( "System load" " total={0:.4f}, engine={1:.4f}, non-engine={2:.4f}".format( load, engine_load, load_no_engine)) self.load = load_no_engine self.latest_real_stats_ts = time.time() else: # In certain cases, we got cpuUser=0.00 for up to around # 90 seconds after a VM was up, causing what seems like # a "general" high cpu load unrelated to that VM. # This caused problems with hosted-engine HA daemons, # which lower the score of that host due to that load. # Rely on cpuUsage value instead. See also: # https://lists.ovirt.org/archives/list/[email protected]/thread/\ # 7HNIFCW4NENG4ADZ5ROT43TCDXDURRJB/ if self.latest_real_stats_ts is None: # Just ignore, but start counting self.latest_real_stats_ts = time.time() elif not util.has_elapsed(self.latest_real_stats_ts, 300): self._log.info("Ignoring cpuUser/cpuSys, init values") else: # No real data, and for more than 5 minutes. # It's probably bad enough that we should just # not ignore - so if cpu load is high, just report # that, and if as a result the score will be low # and the VM will be shut down - so be it. self._log.info( "System load" " total={0:.4f}, engine={1:.4f}, non-engine={2:.4f}". format(load, engine_load, load_no_engine)) self._log.info("engine VM cpu usage is not up-to-date") self.load = load_no_engine
def scan(self): self.clear_store_info() cli = util.connect_vdsm_json_rpc(logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT) imgs = image.Image(self._type, self._sdUUID) imageslist = imgs.get_images_list(cli) for img_uuid in imageslist: try: volumeslist = cli.StorageDomain.getVolumes( imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeslist) except ServerError as e: raise RuntimeError(str(e)) for vol_uuid in volumeslist: try: volumeinfo = cli.Volume.getInfo( volumeID=vol_uuid, imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeinfo) except ServerError as e: raise RuntimeError(str(e)) description = volumeinfo['description'] if ('Disk Description' in description and description[0] == '{' and description[-1] == '}'): description_dict = json.loads(description) self._log.debug(description_dict) if description_dict['Disk Description'] == 'OVF_STORE': self._log.info('Found OVF_STORE: ' 'imgUUID:{img}, volUUID:{vol}'.format( img=img_uuid, vol=vol_uuid, )) # Prepare symlinks for the OVF store try: image_info = cli.Image.prepare( storagepoolID=self._spUUID, storagedomainID=self._sdUUID, imageID=img_uuid, volumeID=vol_uuid) OVFStore._ovf_store_path = image_info["path"] except ServerError as e: raise RuntimeError(str(e)) if self._ovf_store_path is None: self._log.warning('Unable to find OVF_STORE', extra=log_filter.lf_args(LF_OVF_STORE_NOT_FOUND, LF_OVF_LOG_DELAY)) return False return True
def destroy(args): cli = ohautil.connect_vdsm_json_rpc() cli.VM.destroy(vmID=args.vmid)
def checkVmStatus(args): cli = ohautil.connect_vdsm_json_rpc() vmstats = cli.VM.getStats(vmID=args.vmid)[0] print(vmstats['status'])
def scan(self): self.clear_store_info() cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT ) imgs = image.Image(self._type, self._sdUUID) imageslist = imgs.get_images_list(cli) for img_uuid in imageslist: try: volumeslist = cli.StorageDomain.getVolumes( imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log .debug(volumeslist) except ServerError as e: raise RuntimeError(str(e)) for vol_uuid in volumeslist: try: volumeinfo = cli.Volume.getInfo( volumeID=vol_uuid, imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeinfo) except ServerError as e: raise RuntimeError(str(e)) description = volumeinfo['description'] if ( 'Disk Description' in description and description[0] == '{' and description[-1] == '}' ): description_dict = json.loads(description) self._log.debug(description_dict) if description_dict['Disk Description'] == 'OVF_STORE': self._log.info( 'Found OVF_STORE: ' 'imgUUID:{img}, volUUID:{vol}'.format( img=img_uuid, vol=vol_uuid, ) ) # Prepare symlinks for the OVF store try: image_info = cli.Image.prepare( storagepoolID=self._spUUID, storagedomainID=self._sdUUID, imageID=img_uuid, volumeID=vol_uuid ) OVFStore._ovf_store_path = image_info["path"] except ServerError as e: raise RuntimeError(str(e)) if self._ovf_store_path is None: self._log.warning('Unable to find OVF_STORE', extra=log_filter.lf_args( LF_OVF_STORE_NOT_FOUND, LF_OVF_LOG_DELAY )) return False return True
def connect_storage_server(self, timeout=constants.VDSCLI_SSL_TIMEOUT): """ Connect the hosted-engine domain storage server """ self._log.info("Connecting storage server") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=timeout, ) conList, storageType = self._get_conlist(cli, normalize_path=True) if conList: self._log.info("Connecting storage server") try: connections = cli.StoragePool.connectStorageServer( storagepoolID=self._spUUID, domainType=storageType, connectionParams=conList, ) self._log.debug(connections) except ServerError as e: raise RuntimeError('Connection to storage server failed: %s' % str(e)) connected = False failed_paths = [] for con in connections: if con['status'] == 0: connected = True else: if len(connections) > 1: con_details = {} for ce in conList: if con['id'] == ce['id']: con_details = ce self._log.warning( ('A connection path to the storage server is ' 'not active, details: {con_details}').format( con_details=con_details, )) failed_paths.append(con_details) if not connected: raise RuntimeError('Connection to storage server failed') if len(failed_paths ) > 1 and storageType == constants.STORAGE_TYPE_ISCSI: bl_example = ','.join([ fp['ifaceName'] + '<>' + fp['connection'] for fp in failed_paths if 'ifaceName' in fp and 'connection' in fp ]) if bl_example: self._log.warning( ('Many paths of your iSCSI multipath configurations ' 'are failing, if it\'s by design you can blacklist ' 'them setting "{k}={v}" in the hosted-engine ' 'configuration.').format( k=const.ISCSI_MPATHS_BLACKLIST, v=bl_example, )) self._log.info("Refreshing the storage domain") # calling getStorageDomainStats has the side effect of # causing a Storage Domain refresh including # all its tree under /rhev/data-center/... try: cli.StorageDomain.getStats(storagedomainID=self._sdUUID) except ServerError as e: self._log.debug("Error refreshing storage domain: %s", str(e))
def connect_storage_server(self, timeout=constants.VDSCLI_SSL_TIMEOUT): """ Connect the hosted-engine domain storage server """ self._log.info("Connecting storage server") cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=timeout, ) conList, storageType = self._get_conlist(cli, normalize_path=True) if conList: self._log.info("Connecting storage server") try: connections = cli.StoragePool.connectStorageServer( storagepoolID=self._spUUID, domainType=storageType, connectionParams=conList, ) self._log.debug(connections) except ServerError as e: raise RuntimeError( 'Connection to storage server failed: %s' % str(e) ) connected = False failed_paths = [] for con in connections: if con['status'] == 0: connected = True else: if len(connections) > 1: con_details = {} for ce in conList: if con['id'] == ce['id']: con_details = ce self._log.warning( ( 'A connection path to the storage server is ' 'not active, details: {con_details}' ).format( con_details=con_details, ) ) failed_paths.append(con_details) if not connected: raise RuntimeError( 'Connection to storage server failed' ) if len( failed_paths ) > 1 and storageType == constants.STORAGE_TYPE_ISCSI: bl_example = ','.join([ fp['ifaceName'] + '<>' + fp['connection'] for fp in failed_paths if 'ifaceName' in fp and 'connection' in fp ]) if bl_example: self._log.warning(( 'Many paths of your iSCSI multipath configurations ' 'are failing, if it\'s by design you can blacklist ' 'them setting "{k}={v}" in the hosted-engine ' 'configuration.' ).format( k=const.ISCSI_MPATHS_BLACKLIST, v=bl_example, )) self._log.info("Refreshing the storage domain") # calling getStorageDomainStats has the side effect of # causing a Storage Domain refresh including # all its tree under /rhev/data-center/... try: cli.StorageDomain.getStats(storagedomainID=self._sdUUID) except ServerError as e: self._log.debug("Error refreshing storage domain: %s", str(e))
def destroy(args): cli = ohautil.connect_vdsm_json_rpc() cli.VM.destroy(vmID=args.vmid)
def checkVmStatus(args): cli = ohautil.connect_vdsm_json_rpc() vmstats = cli.VM.getStats(vmID=args.vmid)[0] print(vmstats['status'])