def _check_relative_controller_multi_fs(controller_fs_new_list): """ This function verifies the relative controller_fs sizes. :param controller_fs_new_list: :return: None. Raise Client exception on failure. """ if cutils.is_virtual(): return backup_gib_min = constants.BACKUP_OVERHEAD for fs in controller_fs_new_list: if fs.name == constants.FILESYSTEM_NAME_DATABASE: database_gib = fs.size backup_gib_min += fs.size elif fs.name == constants.FILESYSTEM_NAME_CGCS: cgcs_gib = fs.size backup_gib_min += fs.size elif fs.name == constants.FILESYSTEM_NAME_BACKUP: backup_gib = fs.size if backup_gib < backup_gib_min: raise wsme.exc.ClientSideError(_("backup size of %d is " "insufficient. " "Minimum backup size of %d is " "required based upon glance size %d " "and database size %d. " "Rejecting modification " "request." % (backup_gib, backup_gib_min, cgcs_gib, database_gib )))
def _get_virtual_config(self, host): config = {} if utils.is_virtual() or utils.is_virtual_worker(host): config.update({ 'platform::vswitch::params::iommu_enabled': False, 'platform::vswitch::params::hugepage_dir': '/mnt/huge-2048kB', 'openstack::neutron::params::tunnel_csum': True, }) return config
def update_dynamic_options(self, overrides): if utils.is_virtual(): overrides.update({ 'plugins': { 'ml2_conf': { 'ovs_driver': { 'vhost_user_enabled': False } } } })
def _check_relative_controller_fs(controller_fs_new, controller_fs_list): """ This function verifies the relative controller_fs sizes. :param controller_fs_new: :param controller_fs_list: :return: None. Raise Client exception on failure. """ if cutils.is_virtual(): return backup_gib = 0 database_gib = 0 cgcs_gib = 0 for fs in controller_fs_list: if controller_fs_new and fs['name'] == controller_fs_new['name']: fs['size'] = controller_fs_new['size'] if fs['name'] == "backup": backup_gib = fs['size'] elif fs['name'] == constants.DRBD_CGCS: cgcs_gib = fs['size'] elif fs['name'] == "database": database_gib = fs['size'] if backup_gib == 0: LOG.info( "_check_relative_controller_fs backup filesystem not yet setup") return # Required mininum backup filesystem size backup_gib_min = cgcs_gib + database_gib + constants.BACKUP_OVERHEAD if backup_gib < backup_gib_min: raise wsme.exc.ClientSideError(_("backup size of %d is " "insufficient. " "Minimum backup size of %d is " "required based on upon " "glance=%d and database=%d and " "backup overhead of %d. " "Rejecting modification " "request." % (backup_gib, backup_gib_min, cgcs_gib, database_gib, constants.BACKUP_OVERHEAD )))
def _check_memory(dbapi, rpc_port, ihost, platform_reserved_mib=None, vm_hugepages_nr_2M_pending=None, vm_hugepages_nr_1G_pending=None, vswitch_hugepages_reqd=None, vswitch_hugepages_size_mib=None, vm_pending_as_percentage=None): if platform_reserved_mib: # Check for invalid characters try: val = int(platform_reserved_mib) except ValueError: raise wsme.exc.ClientSideError( ("Platform memory must be a number")) if val < 0: raise wsme.exc.ClientSideError( ("Platform memory must be greater than zero")) # translating vswitch nones to zeros vswitch_hugepages_reqd = (0 if vswitch_hugepages_reqd is None else vswitch_hugepages_reqd) # Check for lower limit inode_id = rpc_port['forinodeid'] inode = pecan.request.dbapi.inode_get(inode_id) min_platform_memory = cutils.get_minimum_platform_reserved_memory( dbapi, ihost, inode.numa_node) if int(platform_reserved_mib) < min_platform_memory: raise wsme.exc.ClientSideError( _("Platform reserved memory for numa node %s must be greater than the minimum value %d" ) % (inode.numa_node, min_platform_memory)) # Check if it is within 2/3 percent of the total memory if cutils.host_has_function(ihost, constants.WORKER): node_memtotal_mib = rpc_port['node_memtotal_mib'] else: node_memtotal_mib = rpc_port['memtotal_mib'] max_platform_reserved = node_memtotal_mib * 2 / 3 if int(platform_reserved_mib) > max_platform_reserved: low_core = cutils.is_low_core_system(ihost, pecan.request.dbapi) required_platform_reserved = \ cutils.get_required_platform_reserved_memory( pecan.request.dbapi, ihost, inode.numa_node, low_core) msg_platform_over = ( _("Platform reserved memory %s MiB " "on node %s is not within range [%s, %s]") % (int(platform_reserved_mib), inode.numa_node, required_platform_reserved, max_platform_reserved)) if cutils.is_virtual() or cutils.is_virtual_worker(ihost): LOG.warn(msg_platform_over) else: raise wsme.exc.ClientSideError(msg_platform_over) if not vm_pending_as_percentage: vm_pending_as_percentage = rpc_port["vm_pending_as_percentage"] if vswitch_hugepages_size_mib: vs_hp_size = int(vswitch_hugepages_size_mib) else: vs_hp_size = rpc_port['vswitch_hugepages_size_mib'] if vs_hp_size is None: vs_hp_size = 0 vs_hp_nr = 0 if vswitch_hugepages_reqd: vs_hp_nr = int(vswitch_hugepages_reqd) elif rpc_port['vswitch_hugepages_nr']: vs_hp_nr = int(rpc_port['vswitch_hugepages_nr']) hp_mem_avail = node_memtotal_mib - int(platform_reserved_mib) \ - int(vs_hp_nr * vs_hp_size) # Check if it is within the total amount of memory mem_alloc = 0 if vm_pending_as_percentage == "True": if vm_hugepages_nr_2M_pending is not None: mem_alloc += int(hp_mem_avail * int(vm_hugepages_nr_2M_pending) / 100) elif rpc_port['vm_hugepages_2M_percentage'] is not None: mem_alloc += int(hp_mem_avail * int(rpc_port['vm_hugepages_2M_percentage']) / 100) if vm_hugepages_nr_1G_pending is not None: mem_alloc += int(hp_mem_avail * int(vm_hugepages_nr_1G_pending) / 100) elif rpc_port['vm_hugepages_1G_percentage'] is not None: mem_alloc += int(hp_mem_avail * int(rpc_port['vm_hugepages_1G_percentage']) / 100) else: if vm_hugepages_nr_2M_pending is not None: mem_alloc += int(vm_hugepages_nr_2M_pending) * constants.MIB_2M elif rpc_port['vm_hugepages_nr_2M'] is not None: mem_alloc += int( rpc_port['vm_hugepages_nr_2M']) * constants.MIB_2M if vm_hugepages_nr_1G_pending is not None: mem_alloc += int(vm_hugepages_nr_1G_pending) * constants.MIB_1G elif rpc_port['vm_hugepages_nr_1G'] is not None: mem_alloc += int( rpc_port['vm_hugepages_nr_1G']) * constants.MIB_1G mem_alloc += vs_hp_size * vs_hp_nr # Initial configuration defaults mem_alloc to consume 100% of 2M pages, # so we may marginally exceed available non-huge memory. # Note there will be some variability in total available memory, # so we need to allow some tolerance so we do not hit the limit. avail = node_memtotal_mib - mem_alloc delta = int(platform_reserved_mib) - avail mem_thresh = 32 if int(platform_reserved_mib) > avail + mem_thresh: msg = ( _("Platform reserved memory %s MiB exceeds %s MiB available " "by %s MiB (2M: %s pages; 1G: %s pages). " "total memory=%s MiB, allocated=%s MiB.") % (platform_reserved_mib, avail, delta, delta / 2, delta / 1024, node_memtotal_mib, mem_alloc)) raise wsme.exc.ClientSideError(msg) else: msg = ( _("Platform reserved memory %s MiB, %s MiB available, " "total memory=%s MiB, allocated=%s MiB.") % (platform_reserved_mib, avail, node_memtotal_mib, mem_alloc)) LOG.info(msg)
def _check_memory(rpc_port, ihost, platform_reserved_mib=None, vm_hugepages_nr_2M_pending=None, vm_hugepages_nr_1G_pending=None, vswitch_hugepages_reqd=None, vswitch_hugepages_size_mib=None): if platform_reserved_mib: # Check for lower limit inode_id = rpc_port['forinodeid'] inode = pecan.request.dbapi.inode_get(inode_id) min_platform_memory = cutils.get_minimum_platform_reserved_memory( ihost, inode.numa_node) if int(platform_reserved_mib) < min_platform_memory: raise wsme.exc.ClientSideError( _("Platform reserved memory for numa node %s must be greater than the minimum value %d" ) % (inode.numa_node, min_platform_memory)) # Check if it is within 2/3 percent of the total memory node_memtotal_mib = rpc_port['node_memtotal_mib'] max_platform_reserved = node_memtotal_mib * 2 / 3 if int(platform_reserved_mib) > max_platform_reserved: low_core = cutils.is_low_core_system(ihost, pecan.request.dbapi) required_platform_reserved = \ cutils.get_required_platform_reserved_memory(ihost, inode.numa_node, low_core) msg_platform_over = ( _("Platform reserved memory %s MiB " "on node %s is not within range [%s, %s]") % (int(platform_reserved_mib), inode.numa_node, required_platform_reserved, max_platform_reserved)) if cutils.is_virtual() or cutils.is_virtual_worker(ihost): LOG.warn(msg_platform_over) else: raise wsme.exc.ClientSideError(msg_platform_over) # Check if it is within the total amount of memory mem_alloc = 0 if vm_hugepages_nr_2M_pending: mem_alloc += int(vm_hugepages_nr_2M_pending) * constants.MIB_2M elif rpc_port['vm_hugepages_nr_2M']: mem_alloc += int(rpc_port['vm_hugepages_nr_2M']) * constants.MIB_2M if vm_hugepages_nr_1G_pending: mem_alloc += int(vm_hugepages_nr_1G_pending) * constants.MIB_1G elif rpc_port['vm_hugepages_nr_1G']: mem_alloc += int(rpc_port['vm_hugepages_nr_1G']) * constants.MIB_1G LOG.debug("vm total=%s" % (mem_alloc)) vs_hp_nr = 0 if vswitch_hugepages_size_mib: vs_hp_size = int(vswitch_hugepages_size_mib) else: vs_hp_size = rpc_port['vswitch_hugepages_size_mib'] if vswitch_hugepages_reqd: vs_hp_nr = int(vswitch_hugepages_reqd) elif rpc_port['vswitch_hugepages_nr']: vs_hp_nr = int(rpc_port['vswitch_hugepages_nr']) mem_alloc += vs_hp_size * vs_hp_nr LOG.debug("vs_hp_nr=%s vs_hp_size=%s" % (vs_hp_nr, vs_hp_size)) LOG.debug("memTotal %s mem_alloc %s" % (node_memtotal_mib, mem_alloc)) # Initial configuration defaults mem_alloc to consume 100% of 2M pages, # so we may marginally exceed available non-huge memory. # Note there will be some variability in total available memory, # so we need to allow some tolerance so we do not hit the limit. avail = node_memtotal_mib - mem_alloc delta = int(platform_reserved_mib) - avail mem_thresh = 32 if int(platform_reserved_mib) > avail + mem_thresh: msg = ( _("Platform reserved memory %s MiB exceeds %s MiB available " "by %s MiB (2M: %s pages; 1G: %s pages). " "total memory=%s MiB, allocated=%s MiB.") % (platform_reserved_mib, avail, delta, delta / 2, delta / 1024, node_memtotal_mib, mem_alloc)) raise wsme.exc.ClientSideError(msg) else: msg = ( _("Platform reserved memory %s MiB, %s MiB available, " "total memory=%s MiB, allocated=%s MiB.") % (platform_reserved_mib, avail, node_memtotal_mib, mem_alloc)) LOG.info(msg)
def _get_manifests_overrides(self): manifests_overrides = {} if utils.is_virtual(): manifests_overrides.update({'daemonset_ipmi': False}) return manifests_overrides
def _get_virt_type(self): if utils.is_virtual(): return 'qemu' else: return 'kvm'