def _check_sd_space(self): engine_api = engineapi.get_engine_api(self) self.logger.debug('Successfully connected to the engine') sd_broker = engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID]) ) if not sd_broker: raise RuntimeError(_( 'Unable to find the hosted-engine storage domain in the engine' )) available = sd_broker.get_available() self.logger.debug('availalbe: {a}'.format(a=available)) available_gib = sd_broker.get_available() / 1024 / 1024 / 1024 engine_api.disconnect() required_gib = int( self.environment[ohostedcons.StorageEnv.IMAGE_SIZE_GB] ) if required_gib > available_gib: self.logger.error( _( 'On the hosted-engine storage domain there is not enough ' 'available space to create a new disk for the new ' 'appliance: required {r}GiB - available {a}GiB. ' 'Please extend the hosted-engine storage domain.' ).format( r=required_gib, a=available_gib, ) ) raise RuntimeError(_( 'Not enough free space on the hosted-engine storage domain' ))
def _check_upgrade_requirements(self): self.logger.info('Checking version requirements') engine_api = engineapi.get_engine_api(self) self.logger.debug('Successfully connected to the engine') elements = engine_api.clusters.list() + engine_api.datacenters.list() for e in elements: if isinstance(e, brokers.DataCenter): element_t = 'datacenter' else: element_t = 'cluster' version = e.get_version() release = '{ma}.{mi}'.format( ma=version.major, mi=version.minor, ) if release not in ohostedcons.Const.UPGRADE_SUPPORTED_VERSIONS: self.logger.error( _( '{t} {name} is at version {release} which is not ' 'supported by this upgrade flow. ' 'Please fix it before upgrading.' ).format( t=element_t.title(), name=e.get_name(), release=release, ) ) raise RuntimeError( _('Unsupported {t} level'.format(t=element_t)) ) self.logger.info( _('All the datacenters and clusters are at a compatible level') )
def _closeup(self): engine_api = engineapi.get_engine_api(self) sd_broker = engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID])) # registering the backup disk since it has been created after # the engine backup was taken new_he_disk = None for ud in sd_broker.disks.list(unregistered=True): ud_id = ud.get_id() self.logger.debug('unregistered disk: {id}'.format(id=ud_id)) if ud_id == self.environment[ohostedcons.Upgrade.BACKUP_IMG_UUID]: self.logger.debug('found the engine VM backup disk') new_he_disk = ud if not new_he_disk: raise RuntimeError(_('Unable to find the engine VM backup disk')) self.logger.info( _('Registering the hosted-engine backup disk in the DB')) new_disk_broker = sd_broker.disks.add(new_he_disk, unregistered=True) registered = self._wait_disk_ready( engine_api, new_disk_broker.get_id(), True, ) if not registered: raise RuntimeError( _('Failed registering the engine VM backup disk')) engine_api.disconnect()
def _check_spm(self): self.logger.info('Checking SPM status on this host') engine_api = engineapi.get_engine_api(self) self.logger.debug('Successfully connected to the engine') my_host_id = None my_host_uuid = self._get_host_uuid() for h in engine_api.hosts.list(): if h.get_hardware_information().get_uuid() == my_host_uuid: my_host_id = h.get_id() if not my_host_id: raise(_( 'Unable to find this host in the engine, ' 'please check the backup recovery' )) host_broker = engine_api.hosts.get(id=my_host_id) if not host_broker.get_spm().get_status().state == 'spm': self.logger.error( _( 'This host is not the SPM one, please select it as the ' 'SPM from the engine or run this tool on the SPM host.' ) ) raise RuntimeError( _('Cannot run the upgrade tool if the host is not the SPM') ) else: self.logger.info(_('This upgrade tool is running on the SPM host'))
def _wait_datacenter_up(self): engine_api = engineapi.get_engine_api(self) my_host_id = None my_host_uuid = self._get_host_uuid() for h in engine_api.hosts.list(): if h.get_hardware_information().get_uuid() == my_host_uuid: my_host_id = h.get_id() if not my_host_id: raise(_( 'Unable to find this host in the engine, ' 'please check the backup recovery' )) host_broker = engine_api.hosts.get(id=my_host_id) cluster_broker = engine_api.clusters.get( id=host_broker.get_cluster().get_id() ) dc_broker = engine_api.datacenters.get( id=cluster_broker.get_data_center().get_id() ) ready = False interactive = self.environment[ ohostedcons.Upgrade.CONFIRM_UPGRADE_SUCCESS ] is None while not ready: dc_broker = dc_broker.update() host_broker = host_broker.update() dc_status = dc_broker.get_status().state host_status = host_broker.get_status().state if not (dc_status == 'up' and host_status == 'up'): if interactive: rcontinue = self.dialog.queryString( name=ohostedcons.Confirms.UPGRADE_PROCEED, note=_( 'The datacenter or this host is still marked as ' 'down.\nPlease check engine logs to ensure that ' 'everything is fine.\n ' 'Are you sure you want to continue? ' '(@VALUES@)[@DEFAULT@]: ' ), prompt=True, validValues=(_('Yes'), _('No')), caseSensitive=False, default=_('Yes') ) == _('Yes').lower() if not rcontinue: raise otopicontext.Abort('Aborted by user') else: raise RuntimeError( _( 'This host is not active in the engine ' 'after the restore' ) ) else: ready = True engine_api.disconnect()
def _create_disk(self): engine_api = engineapi.get_engine_api(self) p_sds = params.StorageDomains( storage_domain=[ engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID]) ) ] ) description = 'hosted-engine' if self.environment[ohostedcons.VMEnv.APPLIANCE_VERSION]: description = 'hosted-engine-{v}'.format( v=self.environment[ohostedcons.VMEnv.APPLIANCE_VERSION] ) disk_param = params.Disk( name='virtio-disk0', description=description, comment=description, alias='virtio-disk0', storage_domains=p_sds, size=int( self.environment[ohostedcons.StorageEnv.IMAGE_SIZE_GB] )*1024*1024*1024, interface='virtio', format='raw', sparse=False, bootable=True, ) disk_broker = engine_api.disks.add(disk_param) d_img_id = disk_broker.get_id() d_vol_id = disk_broker.get_image_id() self.logger.debug('vol: {v}'.format(v=d_vol_id)) self.logger.debug('img: {v}'.format(v=d_img_id)) created = self._wait_disk_ready( engine_api, d_img_id, False, ) if not created: raise RuntimeError(_( 'Failed creating the new engine VM disk' )) self.environment[ ohostedcons.Upgrade.PREV_IMG_UUID ] = self.environment[ohostedcons.StorageEnv.IMG_UUID] self.environment[ ohostedcons.Upgrade.PREV_VOL_UUID ] = self.environment[ohostedcons.StorageEnv.VOL_UUID] self.environment[ohostedcons.StorageEnv.IMG_UUID] = d_img_id self.environment[ohostedcons.StorageEnv.VOL_UUID] = d_vol_id
def _create_disk(self): engine_api = engineapi.get_engine_api(self) now = time.localtime() p_sds = params.StorageDomains( storage_domain=[ engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID]) ) ] ) description = '{p}{t}'.format( p=ohostedcons.Const.BACKUP_DISK_PREFIX, t=time.strftime("%Y%m%d%H%M%S", now), ) disk_param = params.Disk( name='virtio-disk0', description=description, comment=description, alias='virtio-disk0', storage_domains=p_sds, size=int( self.environment[ohostedcons.Upgrade.BACKUP_SIZE_GB] )*1024*1024*1024, interface='virtio', format='raw', sparse=False, bootable=True, ) disk_broker = engine_api.disks.add(disk_param) d_img_id = disk_broker.get_id() d_vol_id = disk_broker.get_image_id() self.logger.debug('vol: {v}'.format(v=d_vol_id)) self.logger.debug('img: {v}'.format(v=d_img_id)) created = self._wait_disk_ready( engine_api, d_img_id, False, ) if not created: raise RuntimeError(_( 'Failed creating the new engine VM disk' )) self.environment[ ohostedcons.Upgrade.BACKUP_IMG_UUID ] = d_img_id self.environment[ ohostedcons.Upgrade.BACKUP_VOL_UUID ] = d_vol_id engine_api.disks.get( id=self.environment[ohostedcons.Upgrade.BACKUP_IMG_UUID] ).set_active(False)
def _closeup(self): engine_api = engineapi.get_engine_api(self) sd_broker = engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID]) ) # registering the new disk since it has been created after the backup new_he_disk = None for ud in sd_broker.disks.list(unregistered=True): ud_id = ud.get_id() self.logger.debug('unregistered disk: {id}'.format(id=ud_id)) if ud_id == self.environment[ohostedcons.StorageEnv.IMG_UUID]: self.logger.debug('found the new engine VM disk') new_he_disk = ud if not new_he_disk: raise RuntimeError(_('Unable to find the new engine VM disk')) self.logger.info(_('Registering the new hosted-engine disk in the DB')) new_disk_broker = sd_broker.disks.add(new_he_disk, unregistered=True) registered = self._wait_disk_ready( engine_api, new_disk_broker.get_id(), True, ) if not registered: raise RuntimeError(_( 'Failed registering the new engine VM disk' )) e_vm_b = engine_api.vms.get( id=str(self.environment[ ohostedcons.VMEnv.VM_UUID ]) ) e_vm_b.set_disks([new_disk_broker, ]) e_vm_b.update() # TODO: force OVF_STORE update!!! # it will require SDK4 and https://gerrit.ovirt.org/#/c/54537/ self.logger.warning( "FIXME: please reduce the OVF_STORE update timeout with " "'engine-config -s OvfUpdateIntervalInMinutes=1', this script " "will wait 5 minutes." ) time.sleep(300) engine_api.disconnect()
def _closeup(self): # TODO: refactor into shorter and simpler functions self._getSSH() self._configureHostDeploy() cluster_name = None default_cluster_name = 'Default' engine_api = engineapi.get_engine_api(self) added_to_cluster = False while not added_to_cluster: try: cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME] if not self.environment[ohostedcons.EngineEnv.APP_HOST_NAME]: self.environment[ohostedcons.EngineEnv. APP_HOST_NAME] = socket.gethostname() self.logger.debug( "Getting the list of available clusters via engine's APIs") if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _('Specified cluster does not exist: {cluster}'). format(cluster=cluster_name, )) else: cluster_l = [ c.get_name() for c in engine_api.clusters.list() ] cluster_name = (default_cluster_name if default_cluster_name in cluster_l else cluster_l[0]) if len(cluster_l) > 1: cluster_name = self.dialog.queryString( name='cluster_name', note=_('Enter the name of the cluster to which ' 'you want to add the host (@VALUES@) ' '[@DEFAULT@]: '), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME] = cluster_name cluster = engine_api.clusters.get(cluster_name) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] caps = vds_info.capabilities(conn) bridge_port = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF] if bridge_port in caps['vlans']: self.logger.debug( "Updating engine's management network to be vlanned") vlan_id = caps['vlans'][bridge_port]['vlanid'] self.logger.debug( "Getting engine's management network via engine's APIs" ) cluster_mgmt_network = cluster.networks.get( name=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME]) mgmt_network_id = cluster_mgmt_network.get_id() mgmt_network = engine_api.networks.get(id=mgmt_network_id) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id)) mgmt_network.update() self._wait_network_vlan_ready(engine_api, mgmt_network_id, vlan_id) self.logger.debug('Adding the host to the cluster') engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME], # Note that the below is required for compatibility # with vdsm-generated pki. See bz 1178535. address=self.environment[ ohostedcons.NetworkEnv.HOST_NAME], cluster=cluster, ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT], ), override_iptables=self.environment[ otopicons.NetEnv.IPTABLES_ENABLE], )) added_to_cluster = True except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _('Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n').format( cluster=cluster_name, details=e.detail)) while not check_liveliness.manualSetupDispatcher( self, check_liveliness.MSD_FURTHER_ACTIONS, self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN]): pass up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME]) # TODO: host-deploy restarted vdscli so we need to # connect again if not up: self.logger.error( _('Unable to add {host} to the manager').format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME], )) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name) self.logger.debug(cpu.__dict__) cpu.set_id(self.environment[ohostedcons.VDSMEnv.ENGINE_CPU]) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _('Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n').format( cluster=cluster_name, details=e.detail)) engine_api.disconnect()
def _wait_datacenter_up(self): engine_api = engineapi.get_engine_api(self) cluster_broker = engine_api.clusters.get( name=self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] ) dc_broker = engine_api.datacenters.get( id=cluster_broker.get_data_center().get_id() ) my_host_id = None my_host_uuid = self._get_host_uuid() for h in engine_api.hosts.list(): if h.get_hardware_information().get_uuid() == my_host_uuid: my_host_id = h.get_id() if not my_host_id: raise(_( 'Unable to find this host in the engine, ' 'please check the backup recovery' )) host_broker = engine_api.hosts.get(id=my_host_id) ready = False interactive = self.environment[ ohostedcons.Upgrade.CONFIRM_DISK_SWITCH ] is None while not ready: dc_broker = dc_broker.update() host_broker = host_broker.update() dc_status = dc_broker.get_status().state host_status = host_broker.get_status().state if not (dc_status == 'up' and host_status == 'up'): if interactive: rcontinue = self.dialog.queryString( name=ohostedcons.Confirms.UPGRADE_PROCEED, note=_( 'The datacenter or this host is still marked as ' 'down.\nPlease ensure that everything is ready ' 'before definitively switching the disk of the ' 'engine VM.\n' 'Are you sure you want to continue? ' '(@VALUES@)[@DEFAULT@]: ' ), prompt=True, validValues=(_('Yes'), _('No')), caseSensitive=False, default=_('Yes') ) == _('Yes').lower() if not rcontinue: raise otopicontext.Abort('Aborted by user') else: raise RuntimeError( _( 'This host is not active in the engine ' 'after the restore' ) ) else: ready = True engine_api.disconnect() if self.environment[ ohostedcons.Upgrade.CONFIRM_DISK_SWITCH ] is None: self.environment[ ohostedcons.Upgrade.CONFIRM_DISK_SWITCH ] = self.dialog.queryString( name=ohostedcons.Confirms.UPGRADE_PROCEED, note=_( 'The engine VM is currently running with the new disk but ' 'the hosted-engine configuration is still point to the ' 'old one.\nPlease make sure that everything is fine on ' 'the engine VM side before definitively switching the ' 'disks.\n' 'Are you sure you want to continue? ' '(@VALUES@)[@DEFAULT@]: ' ), prompt=True, validValues=(_('Yes'), _('No')), caseSensitive=False, default=_('Yes') ) == _('Yes').lower() if not self.environment[ ohostedcons.Upgrade.CONFIRM_DISK_SWITCH ]: raise otopicontext.Abort('Aborted by user')
def _validate_authz(self, files_tar): self.logger.info(_("Validating authentication plugins")) authz_ext = set([]) flist = files_tar.getmembers() self.logger.debug('Content:') self.logger.debug([f.name for f in flist]) authplist = [ f for f in flist if f.isfile() and 'etc/ovirt-engine/extensions.d' in f.name and f.name.endswith('.properties') ] self.logger.debug('Configured plugins:') self.logger.debug([ap.name for ap in authplist]) for authp in authplist: authp_file = files_tar.extractfile(authp) auth_f_str = '[section]\n' + authp_file.read() auth_fp = StringIO(unicode(auth_f_str)) config = configparser.RawConfigParser() try: config.readfp(auth_fp) except configparser.Error as ex: msg = _( 'The extension configuration file \'{authp}\' inside ' 'the backup seams invalid, ' 'please check its content on the engine VM and fix: {ex}' ).format(authp=authp, ex=ex) self.logger.error(msg) return False if (config.has_section('section') and config.has_option( 'section', 'ovirt.engine.extension.provides') and config.has_option('section', 'ovirt.engine.extension.name')): provides = config.get('section', 'ovirt.engine.extension.provides') name = config.get('section', 'ovirt.engine.extension.name') self.logger.debug('Extension {n} provides {p}'.format( n=name, p=provides)) if provides == 'org.ovirt.engine.api.extensions.aaa.Authz': authz_ext.add(name) else: msg = _('The extension configuration file \'{authp}\' inside ' 'the backup seams invalid, ' 'please check its content on the engine VM and fix.' ).format(authp=authp, ) self.logger.error(msg) return False self.logger.debug( 'Authz extensions configured on fs: {l}'.format(l=authz_ext)) engine_api = engineapi.get_engine_api(self) eng_authz_domains = set( [d.get_name() for d in engine_api.domains.list()]) self.logger.debug('Authz domains configured on the engine: {l}'.format( l=eng_authz_domains)) if eng_authz_domains > authz_ext: to_be_fixed = eng_authz_domains - authz_ext msg = _('{tbf}: such AAA domains are still configured in a ' 'deprecated way that is not compatible with the current ' 'release; please upgrade them to ovirt-engine-extension ' 'mechanism before proceeding.').format( tbf=[d for d in to_be_fixed], ) self.logger.error(msg) raise RuntimeError('Unsupported AAA mechanism') return True
def _closeup(self): # TODO: refactor into shorter and simpler functions self._getSSH() self._configureHostDeploy() cluster_name = None default_cluster_name = 'Default' engine_api = engineapi.get_engine_api(self) added_to_cluster = False while not added_to_cluster: try: cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] self.logger.debug( "Getting the list of available clusters via engine's APIs" ) if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _( 'Specified cluster does not exist: {cluster}' ).format( cluster=cluster_name, ) ) else: cluster_l = [ c.get_name() for c in engine_api.clusters.list() ] cluster_name = ( default_cluster_name if default_cluster_name in cluster_l else cluster_l[0] ) if len(cluster_l) > 1: cluster_name = self.dialog.queryString( name='cluster_name', note=_( 'Enter the name of the cluster to which ' 'you want to add the host (@VALUES@) ' '[@DEFAULT@]: ' ), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] = cluster_name cluster = engine_api.clusters.get(cluster_name) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] net_info = CachingNetInfo(vds_info.capabilities(conn)) bridge_port = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ] if bridge_port in net_info.vlans: self.logger.debug( "Updating engine's management network to be vlanned" ) vlan_id = net_info.vlans[bridge_port]['vlanid'] self.logger.debug( "Getting engine's management network via engine's APIs" ) cluster_mgmt_network = cluster.networks.get( name=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME] ) mgmt_network_id = cluster_mgmt_network.get_id() mgmt_network = engine_api.networks.get( id=mgmt_network_id ) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id) ) mgmt_network.update() self._wait_network_vlan_ready( engine_api, mgmt_network_id, vlan_id ) self.logger.debug('Adding the host to the cluster') engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], # Note that the below is required for compatibility # with vdsm-generated pki. See bz 1178535. address=self.environment[ ohostedcons.NetworkEnv.HOST_NAME ], cluster=cluster, ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT ], ), override_iptables=self.environment[ otopicons.NetEnv.IPTABLES_ENABLE ], ) ) added_to_cluster = True except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) while not check_liveliness.manualSetupDispatcher( self, check_liveliness.MSD_FURTHER_ACTIONS, self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN ] ): pass up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME] ) # TODO: host-deploy restarted vdscli so we need to # connect again if not up: self.logger.error( _( 'Unable to add {host} to the manager' ).format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], ) ) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name ) self.logger.debug(cpu.__dict__) cpu.set_id( self.environment[ohostedcons.VDSMEnv.ENGINE_CPU] ) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) engine_api.disconnect()
def _check_upgrade_requirements(self): self.logger.info('Checking version requirements') upg = upgrade.Upgrade() if not upg.is_conf_file_uptodate(): self.logger.error( _('Hosted-engine configuration didn\'t correctly reach 3.6 ' 'level. Please successfully complete the upgrade to ' '3.6 before proceeding with this upgrade. ')) raise RuntimeError( _('Unsupported hosted-engine configuration level')) self.logger.info(_('Checking metadata area')) vmstatus = vm_status.VmStatus() status = vmstatus.get_status() self.logger.debug('hosted-engine-status: {s}'.format(s=status)) old_metadata = False for h in status['all_host_stats']: if 'stopped' not in status['all_host_stats'][h]: self.logger.error( _('Metadata for host {h} is incompatible with this tool.\n' 'Before proceeding with this upgrade, ' 'please correctly upgrade it to 3.6 ' 'or clean its metadata area with\n' ' \'hosted-engine --clean-metadata --host-id={id}\'\n' 'if decommissioned or not anymore involved in HE.'). format( h=status['all_host_stats'][h]['hostname'], id=status['all_host_stats'][h]['host-id'], )) old_metadata = True if old_metadata: raise RuntimeError(_('Host with unsupported metadata area')) self.logger.info( _('Hosted-engine configuration is at a compatible level')) engine_api = engineapi.get_engine_api(self) self.logger.debug('Successfully connected to the engine') elements = engine_api.clusters.list() + engine_api.datacenters.list() for e in elements: if isinstance(e, brokers.DataCenter): element_t = 'datacenter' else: element_t = 'cluster' version = e.get_version() release = '{ma}.{mi}'.format( ma=version.major, mi=version.minor, ) if release not in ohostedcons.Const.UPGRADE_REQUIRED_CLUSTER_V: self.logger.error( _('{t} {name} is at version {release} which is not ' 'supported by this upgrade flow. ' 'Please fix it before upgrading.').format( t=element_t.title(), name=e.get_name(), release=release, )) raise RuntimeError( _('Unsupported {t} level'.format(t=element_t))) self.logger.info( _('All the datacenters and clusters are at a compatible level')) e_major = engine_api.get_product_info().version.major e_minor = engine_api.get_product_info().version.minor if not e_major: # just for compatibility # see: bz#1405386 e_major = engine_api.get_product_info().get_version().major e_minor = engine_api.get_product_info().get_version().minor if e_major is not None and e_minor is not None: self._e_version = '{ma}.{mi}'.format( ma=e_major, mi=e_minor, )
def _check_sd_and_disk_space(self): engine_api = engineapi.get_engine_api(self) self.logger.debug('Successfully connected to the engine') sd_broker = engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID])) if not sd_broker: raise RuntimeError( _('Unable to find the hosted-engine storage domain in the engine' )) available = sd_broker.get_available() self.logger.debug('availalbe: {a}'.format(a=available)) available_gib = sd_broker.get_available() / 1024 / 1024 / 1024 engine_api.disconnect() required_gib = int( self.environment[ohostedcons.StorageEnv.IMAGE_SIZE_GB]) if required_gib > available_gib: self.logger.error( _('On the hosted-engine storage domain there is not enough ' 'available space to create a new disk for backup ' 'purposes and eventually extend the current disk to ' 'fit the new appliance: ' 'required {r}GiB - available {a}GiB. ' 'Please extend the hosted-engine storage domain.').format( r=required_gib, a=available_gib, )) raise RuntimeError( _('Not enough free space on the hosted-engine storage domain')) else: self.logger.info( _('The hosted-engine storage domain has enough free space to ' 'contain a new backup disk.')) if int(self.environment[ohostedcons.StorageEnv.IMAGE_SIZE_GB]) > int( self.environment[ohostedcons.Upgrade.BACKUP_SIZE_GB]): self.logger.warning( _('On the hosted-engine disk there is not enough ' 'available space to fit the new appliance ' 'disk: ' 'required {r}GiB - available {a}GiB. ').format( r=self.environment[ohostedcons.StorageEnv.IMAGE_SIZE_GB], a=self.environment[ohostedcons.Upgrade.BACKUP_SIZE_GB], )) interactive = self.environment[ ohostedcons.Upgrade.CONFIRM_UPGRADE_DISK_RESIZE] is None if interactive: self.environment[ ohostedcons.Upgrade. CONFIRM_UPGRADE_DISK_RESIZE] = self.dialog.queryString( name=ohostedcons.Confirms.UPGRADE_DISK_RESIZE_PROCEED, note=_( 'This upgrade tool can resize the hosted-engine VM ' 'disk; before resizing a backup will be created.\n ' 'Are you sure you want to continue? ' '(@VALUES@)[@DEFAULT@]: '), prompt=True, validValues=(_('Yes'), _('No')), caseSensitive=False, default=_('Yes')) == _('Yes').lower() if self.environment[ ohostedcons.Upgrade.CONFIRM_UPGRADE_DISK_RESIZE]: self.environment[ohostedcons.Upgrade.EXTEND_VOLUME] = True else: raise RuntimeError( _('Not enough free space on the hosted-engine disk, ' 'please extend it')) else: self.environment[ohostedcons.Upgrade.EXTEND_VOLUME] = False