def _closeup_reprepare_images(self): self.logger.debug(_("Preparing again HE images")) img = image.Image( self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE], self.environment[ohostedcons.StorageEnv.SD_UUID], ) img.prepare_images()
def _scan_images(self): """ Scan for metadata, lockspace and configuration image uuids """ # VDSM getImagesList doesn't work when the SD is not connect to # a storage pool so we cannot simply directly call getImagesList # see: https://bugzilla.redhat.com/1274622 img = image.Image( self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE], self.environment[ohostedcons.StorageEnv.SD_UUID], ) img.prepare_images() images = img.get_images_list(self.cli) self.logger.debug("Existing images: {images}".format(images=images)) for img in images: self._analyze_image(img)
def _is_conf_volume_there(self): """ It tries to detect the configuration volume since another host could create it before us. The detection is based on the configuration volume description which is hardcoded. Engine, lockspace and metadata images are excluded from the scan since we already know their content. """ self._log.info('Looking for conf volume') isconfvolume = False self._conf_imgUUID = None self._conf_volUUID = None img = image.Image(self._type, self._sdUUID) imageslist = img.get_images_list(self._cli) self._log.debug('found images: ' + str(imageslist)) # excluding engine, metadata and lockspace images unknowimages = set(imageslist) - set([ self._vm_img_uuid, self._metadata_imgUUID, self._lockspace_imgUUID ]) self._log.debug('candidate images: ' + str(unknowimages)) for img_uuid in unknowimages: try: volumeslist = self._cli.StorageDomain.getVolumes( imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeslist) except ServerError as e: # avoid raising here since after a reboot we # didn't called prepareImage on all the possible images self._log.debug( 'Error fetching volumes for {image}: {message}'.format( image=image, message=str(e), )) continue for vol_uuid in volumeslist: with self._server_error_to_runtime_error(): volumeinfo = self._cli.Volume.getInfo( volumeID=vol_uuid, imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeinfo) description = volumeinfo['description'] if description == constants.CONF_IMAGE_DESC: self._conf_imgUUID = img_uuid self._conf_volUUID = vol_uuid isconfvolume = True self._log.info('Found conf volume: ' 'imgUUID:{img}, volUUID:{vol}'.format( img=self._conf_imgUUID, vol=self._conf_volUUID, )) if self._conf_imgUUID is None or self._conf_volUUID is None: self._log.error('Unable to find HE conf volume') return isconfvolume
def scan(self): self.clear_store_info() cli = util.connect_vdsm_json_rpc(logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT) imgs = image.Image(self._type, self._sdUUID) imageslist = imgs.get_images_list(cli) for img_uuid in imageslist: try: volumeslist = cli.StorageDomain.getVolumes( imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeslist) except ServerError as e: raise RuntimeError(str(e)) for vol_uuid in volumeslist: try: volumeinfo = cli.Volume.getInfo( volumeID=vol_uuid, imageID=img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, ) self._log.debug(volumeinfo) except ServerError as e: raise RuntimeError(str(e)) description = volumeinfo['description'] if ('Disk Description' in description and description[0] == '{' and description[-1] == '}'): description_dict = json.loads(description) self._log.debug(description_dict) if description_dict['Disk Description'] == 'OVF_STORE': self._log.info('Found OVF_STORE: ' 'imgUUID:{img}, volUUID:{vol}'.format( img=img_uuid, vol=vol_uuid, )) # Prepare symlinks for the OVF store try: image_info = cli.Image.prepare( storagepoolID=self._spUUID, storagedomainID=self._sdUUID, imageID=img_uuid, volumeID=vol_uuid) OVFStore._ovf_store_path = image_info["path"] except ServerError as e: raise RuntimeError(str(e)) if self._ovf_store_path is None: self._log.warning('Unable to find OVF_STORE', extra=log_filter.lf_args(LF_OVF_STORE_NOT_FOUND, LF_OVF_LOG_DELAY)) return False return True
def _choose_backup(self): candidate_backup_volumes = [] cli = self.environment[ohostedcons.VDSMEnv.VDS_CLI] img = image.Image( self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE], self.environment[ohostedcons.StorageEnv.SD_UUID], ) img_list = img.get_images_list(cli) self.logger.debug('img list: {il}'.format(il=img_list)) sdUUID = self.environment[ohostedcons.StorageEnv.SD_UUID] spUUID = ohostedcons.Const.BLANK_UUID index = 0 for img in img_list: try: volumeslist = cli.Volume.getList( imageID=img, storagepoolID=spUUID, storagedomainID=sdUUID, ) self.logger.debug('volumeslist: {vl}'.format(vl=volumeslist)) except ServerError as e: # avoid raising here, simply skip the unknown image self.logger.debug( 'Error fetching volumes for {image}: {message}'.format( image=image, message=str(e), ) ) continue for vol_uuid in volumeslist: try: volumeinfo = cli.Volume.getInfo( volumeID=vol_uuid, imageID=img, storagepoolID=spUUID, storagedomainID=sdUUID, ) self.logger.debug(volumeinfo) except ServerError as e: # avoid raising here, simply skip the unknown volume self.logger.debug( ( 'Error fetching volume info ' 'for {volume}: {message}' ).format( volume=vol_uuid, message=str(e), ) ) continue disk_description = '' try: jd = json.loads(volumeinfo['description']) disk_description = jd['DiskDescription'] except (ValueError, KeyError): pass if disk_description.startswith( ohostedcons.Const.BACKUP_DISK_PREFIX ): candidate_backup_volumes.append({ 'index': index+1, 'description': disk_description, 'img_uuid': img, 'vol_uuid': vol_uuid, }) index += 1 if not candidate_backup_volumes: self.logger.error(_( 'Unable to find any backup disk: please ensure that a backup ' 'has been correctly created during a previous upgrade attempt' )) raise RuntimeError(_('No available backup disk')) bd_list = '' for entry in candidate_backup_volumes: bd_list += _( '\t[{i}] - {description}\n' ).format( i=entry['index'], description=entry['description'], ) self.dialog.note( _( 'The following backup disk have been ' 'found on your system:\n' '{bd_list}' ).format( bd_list=bd_list, ) ) sdisk = self.dialog.queryString( name='OVEHOSTED_RB_BACKUP_DISK', note=_( 'Please select one of them ' '(@VALUES@) [@DEFAULT@]: ' ), prompt=True, caseSensitive=True, default='1', validValues=[ str(i + 1) for i in range(len(candidate_backup_volumes)) ], ) selected_disk = candidate_backup_volumes[int(sdisk)-1] self.environment[ ohostedcons.Upgrade.BACKUP_IMG_UUID ] = selected_disk['img_uuid'] self.environment[ ohostedcons.Upgrade.BACKUP_VOL_UUID ] = selected_disk['vol_uuid']
def _validate_lm_volumes(self): """ This method, if the relevant uuids aren't in the initial answerfile, will look for lockspace and metadata volumes on the shared storage identifying them by their description. We need to re-scan each time we run the upgrade flow since they could have been created in a previous upgrade attempt. If the volumes are not on disk, it triggers volume creation as for fresh deployments; volume creation code will also remove the previous file and create a new symlink to the volume using the same file name. """ self.logger.info(_('Scanning for lockspace and metadata volumes')) cli = self.environment[ohostedcons.VDSMEnv.VDS_CLI] img = image.Image( self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE], self.environment[ohostedcons.StorageEnv.SD_UUID], ) img_list = img.get_images_list(cli) self.logger.debug('img list: {il}'.format(il=img_list)) sdUUID = self.environment[ohostedcons.StorageEnv.SD_UUID] spUUID = ohostedcons.Const.BLANK_UUID for img in img_list: try: volumeslist = cli.StorageDomain.getVolumes( imageID=img, storagepoolID=spUUID, storagedomainID=sdUUID, ) self.logger.debug('volumeslist: {vl}'.format(vl=volumeslist)) except ServerError as e: # avoid raising here, simply skip the unknown image self.logger.debug( 'Error fetching volumes for {image}: {message}'.format( image=img, message=str(e), )) continue for vol_uuid in volumeslist['items']: try: volumeinfo = cli.Volume.getInfo( volumeID=vol_uuid, imageID=img, storagepoolID=spUUID, storagedomainID=sdUUID, ) self.logger.debug(volumeinfo) except ServerError as e: # avoid raising here, simply skip the unknown volume self.logger.debug(('Error fetching volume info ' 'for {volume}: {message}').format( volume=vol_uuid, message=str(e), )) continue disk_description = volumeinfo['description'] if disk_description == self.environment[ ohostedcons.SanlockEnv.LOCKSPACE_NAME] + '.lockspace': self.environment[ohostedcons.StorageEnv. LOCKSPACE_VOLUME_UUID] = vol_uuid self.environment[ ohostedcons.StorageEnv.LOCKSPACE_IMAGE_UUID] = img elif disk_description == self.environment[ ohostedcons.SanlockEnv.LOCKSPACE_NAME] + '.metadata': self.environment[ ohostedcons.StorageEnv.METADATA_VOLUME_UUID] = vol_uuid self.environment[ ohostedcons.StorageEnv.METADATA_IMAGE_UUID] = img if (self.environment[ohostedcons.StorageEnv.LOCKSPACE_VOLUME_UUID] and self.environment[ohostedcons.StorageEnv.METADATA_VOLUME_UUID]): self.logger.info( _('Lockspace and metadata volumes are already on the ' 'HE storage domain')) return interactive = self.environment[ ohostedcons.Upgrade.LM_VOLUMES_UPGRADE_PROCEED] is None if interactive: self.environment[ ohostedcons.Upgrade. LM_VOLUMES_UPGRADE_PROCEED] = self.dialog.queryString( name=ohostedcons.Confirms.LM_VOLUMES_UPGRADE_PROCEED, note=_( 'This system was initially deployed with oVirt 3.4 ' 'using file based metadata and lockspace area.\n' 'Now you have to upgrade to up to date structure ' 'using this tool.\n' 'In order to do that please manually stop ovirt-ha-agent ' 'and ovirt-ha-broker on all the other HE hosts ' '(but not this one). ' 'At the end you of this procedure you can simply ' 'manually upgrade ovirt-hosted-engine-ha and ' 'restart ovirt-ha-agent and ovirt-ha-broker on all ' 'the hosted-engine hosts.\n' 'Are you sure you want to continue? ' '(@VALUES@)[@DEFAULT@]: '), prompt=True, validValues=(_('Yes'), _('No')), caseSensitive=False, default=_('Yes')) == _('Yes').lower() if not self.environment[ ohostedcons.Upgrade.LM_VOLUMES_UPGRADE_PROCEED]: raise otopicontext.Abort('Aborted by user') self.logger.info( _('Waiting for HA agents on other hosts to be stopped')) vmstatus = vm_status.VmStatus() ready = False while not ready: ready = True status = vmstatus.get_status() self.logger.debug('hosted-engine-status: {s}'.format(s=status)) for h in status['all_host_stats']: host_id = status['all_host_stats'][h]['host-id'] hostname = status['all_host_stats'][h]['hostname'] if 'stopped' in status['all_host_stats'][h]: stopped = status['all_host_stats'][h]['stopped'] if host_id == self.environment[ ohostedcons.StorageEnv.HOST_ID]: if stopped: self.logger.warning( _('Please keep ovirt-ha-agent running ' 'on this host')) ready = False else: if not stopped: self.logger.warning( _('ovirt-ha-agent is still active on host {h}, ' 'please stop it ' '(it can require a few seconds).').format( h=hostname)) ready = False else: self.logger.warning( _("Ignoring inconsistent info for host {h}".format( h=hostname, ))) if not ready: time.sleep(2) self.environment[ohostedcons.Upgrade.UPGRADE_CREATE_LM_VOLUMES] = True
def _closeup_reprepare_images(self): self.logger.debug(_("Preparing again HE images")) img = image.Image() img.prepare_images()