def _init(self): self._config = config.Config(logger=self.logger) # TODO: catch error if not configured and properly raise self.environment.setdefault( ohostedcons.StorageEnv.DOMAIN_TYPE, self._config.get(config.ENGINE, const.DOMAIN_TYPE), ) self.environment.setdefault( ohostedcons.StorageEnv.SD_UUID, self._config.get(config.ENGINE, const.SD_UUID), ) self.environment.setdefault( ohostedcons.StorageEnv.SP_UUID, self._config.get(config.ENGINE, const.SP_UUID), ) self.environment.setdefault( ohostedcons.StorageEnv.CONF_IMG_UUID, self._config.get(config.ENGINE, const.CONF_IMAGE_UUID), ) self.environment.setdefault( ohostedcons.StorageEnv.CONF_VOL_UUID, self._config.get(config.ENGINE, const.CONF_VOLUME_UUID), ) self.environment.setdefault( ohostedcons.VMEnv.VM_UUID, self._config.get(config.ENGINE, const.HEVMID), ) self.environment.setdefault( ohostedcons.StorageEnv.HOST_ID, int(self._config.get(config.ENGINE, const.HOST_ID)), ) self.environment.setdefault( ohostedcons.Upgrade.LM_VOLUMES_UPGRADE_PROCEED, None, )
def __init__(self): self._log = logging.getLogger("%s.StorageServer" % __name__) self._log.addFilter(log_filter.get_intermittent_filter()) self._config = config.Config(logger=self._log) self._cli = util.connect_vdsm_json_rpc( logger=self._log, timeout=constants.VDSCLI_SSL_TIMEOUT) self._type = self._config.get(config.ENGINE, const.DOMAIN_TYPE) self._spUUID = self._config.get(config.ENGINE, const.SP_UUID) self._sdUUID = self._config.get(config.ENGINE, const.SD_UUID) self._storage = self._config.get(config.ENGINE, const.STORAGE) self._HEVMID = self._config.get(config.ENGINE, const.HEVMID) self._host_id = int(self._config.get(config.ENGINE, const.HOST_ID)) self._fake_sd_size = '2G' self._vfstype = 'ext3' self._vm_img_uuid = self._config.get(config.ENGINE, const.VM_DISK_IMG_ID) vm_vol_uuid = None try: vm_vol_uuid = self._config.get(config.ENGINE, const.VM_DISK_VOL_ID) except (KeyError, ValueError): try: vm_vol_uuid = self._cli.StorageDomain.getVolumes( imageID=self._vm_img_uuid, storagepoolID=self._spUUID, storagedomainID=self._sdUUID, )[0] except ServerError: # Ignore error pass self._vm_vol_uuid = vm_vol_uuid self._conf_imgUUID = None self._conf_volUUID = None self._metadata_imgUUID = self._config.get( config.ENGINE, const.METADATA_IMAGE_UUID, ) self._metadata_volUUID = self._config.get( config.ENGINE, const.METADATA_VOLUME_UUID, ) self._lockspace_imgUUID = self._config.get( config.ENGINE, const.LOCKSPACE_IMAGE_UUID, ) self._lockspace_volUUID = self._config.get( config.ENGINE, const.LOCKSPACE_VOLUME_UUID, ) self._fake_SD_path = None self._fake_file = None self._fake_mastersd_uuid = str(uuid.uuid4()) self._selinux_enabled = selinux.is_selinux_enabled() self._fake_master_connection_uuid = str(uuid.uuid4())
def _boot_new_appliance(self): try: fd, self._temp_vm_conf = tempfile.mkstemp( prefix='appliance', suffix='.conf', ) os.close(fd) _config = config.Config(logger=self.logger) _config.refresh_vm_conf() _orig_vm_conf = _config.get(config.ENGINE, const.CONF_FILE) vm_conf = open(_orig_vm_conf) lines = vm_conf.readlines() self.logger.debug('Original vm.conf: {l}'.format(l=lines)) vm_conf.close() plines = [] cdrom_attached = False for line in lines: if 'device:cdrom' in line and 'path:' in line: # attaching cloud-init iso to configure the new appliance sline = re.sub( r'path:[^,]*,', 'path:{iso},'.format( iso=self.environment[ohostedcons.VMEnv.CDROM] ), line ) plines.append(sline) cdrom_attached = True else: plines.append(line) if not cdrom_attached: raise RuntimeError(_( 'Unable to attach cloud-init ISO image' )) vm_conf = open(self._temp_vm_conf, 'w') vm_conf.writelines(plines) vm_conf.close() self.logger.debug('Patched vm.conf: {l}'.format(l=plines)) except EnvironmentError as ex: self.logger.error( _( 'Unable to generate the temporary vm.conf file: {msg}' ).format( msg=ex.message, ) ) self._createvm()
def set_mode(self, mode): ha_cli = client.HAClient() if mode not in ( 'local', 'global', 'none', ): sys.stderr.write( _('Invalid maintenance mode: {0}\n').format(mode) ) return False m_local = (mode == 'local') m_global = (mode == 'global') if m_local: # Check that the engine VM is not running here vm_id = config.Config().get(config.ENGINE, const.HEVMID) cli = ohautil.connect_vdsm_json_rpc() try: vm_list = cli.Host.getVMList() except ServerError as e: sys.stderr.write( _("Failed communicating with VDSM: {e}").format(e=e) ) return False if vm_id in [ item['vmId'] for item in vm_list ]: sys.stderr.write(_( "Unable to enter local maintenance mode: " "the engine VM is running on the current host, " "please migrate it before entering local " "maintenance mode.\n" )) return False try: ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL, value=m_local, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.GLOBAL, value=m_global, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL_MANUAL, value=m_local, ) except socket.error: sys.stderr.write( _('Cannot connect to the HA daemon, please check the logs.\n') ) return False return True
def __init__(self): from ovirt_hosted_engine_ha.env import config from ovirt_hosted_engine_ha.env import config_constants as const self._log = logging.getLogger("%s.OVFStore" % __name__) self._log.addFilter(log_filter.get_intermittent_filter()) self._config = config.Config(logger=self._log) self._type = self._config.get(config.ENGINE, const.DOMAIN_TYPE) self._spUUID = self._config.get(config.ENGINE, const.SP_UUID) self._sdUUID = self._config.get(config.ENGINE, const.SD_UUID) self._conf_vol_uuid = self._config.get(config.ENGINE, const.CONF_VOLUME_UUID) self._conf_img_uuid = self._config.get(config.ENGINE, const.CONF_IMAGE_UUID) self._HEVMID = self._config.get(config.ENGINE, const.HEVMID)
def set_mode(self, mode): ha_cli = client.HAClient() if mode not in ( 'local', 'global', 'none', ): sys.stderr.write(_('Invalid maintenance mode: {0}\n').format(mode)) return False m_local = (mode == 'local') m_global = (mode == 'global') if m_local: # Check that we have a host where to migrate VM to. _host_id = int(config.Config().get(config.ENGINE, const.HOST_ID)) candidates = ha_cli.get_all_host_stats() candidates = [ h for h in candidates if candidates[h]["score"] > 0 and candidates[h]["host-id"] != _host_id and candidates[h]["live-data"] ] if not candidates: sys.stderr.write( _("Unable to enter local maintenance mode: " "there are no available hosts capable " "of running the engine VM.\n")) return False try: ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL, value=m_local, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.GLOBAL, value=m_global, ) ha_cli.set_maintenance_mode( mode=ha_cli.MaintenanceMode.LOCAL_MANUAL, value=m_local, ) except socket.error: sys.stderr.write( _('Cannot connect to the HA daemon, please check the logs.\n')) return False return True
def is_conf_file_uptodate(conf=None): if conf is None: conf = config.Config(logger) try: volume = conf.get(config.ENGINE, const.CONF_VOLUME_UUID) logger.debug('Conf volume: %s ' % volume) image = conf.get(config.ENGINE, const.CONF_IMAGE_UUID) logger.debug('Conf image: %s ' % image) spuuid = conf.get(config.ENGINE, const.SP_UUID) if spuuid != constants.BLANK_UUID: logger.debug("Storage domain UUID is not blank") return False return True except (KeyError, ValueError): return False
def _init(self): self._config = config.Config(logger=self.logger) # TODO: catch error if not configured and properly raise self.environment.setdefault( ohostedcons.StorageEnv.DOMAIN_TYPE, self._config.get(config.ENGINE, config.DOMAIN_TYPE), ) self.environment.setdefault( ohostedcons.StorageEnv.SD_UUID, self._config.get(config.ENGINE, config.SD_UUID), ) self.environment.setdefault( ohostedcons.StorageEnv.CONF_IMG_UUID, self._config.get(config.ENGINE, config.CONF_IMAGE_UUID), ) self.environment.setdefault( ohostedcons.StorageEnv.CONF_VOL_UUID, self._config.get(config.ENGINE, config.CONF_VOLUME_UUID), ) self.environment.setdefault( ohostedcons.VMEnv.VM_UUID, self._config.get(config.ENGINE, config.HEVMID), )