def __init__(self, args): super(VCPUPinTest, self).__init__() self.args = args creds = get_credentials(args) self.creds = creds self.check_args(creds) self.numa = NUMA(**creds) # Create a NUMA object self.numa.clean() # make sure we have a clean system self.logger = glob_logger # 1. First, make sure our system is enabled for vcpu pinning. We assume that our environment # is a nested virtual environment with 2 L1 hypervisors running on the L0 baremetal host. # First, get the virsh capabilities from the baremetal. self.computes = get_computes(args) self.nested = False if len(self.computes[0]) > 1: self.nested = True ensure_nested_support(self.computes)
tenant_name = args.tenant_name auth_url = args.auth_url password = args.password creds = { "username": username, "tenant_name": tenant_name, "auth_url": auth_url, "password": password } missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error( "Must supply --{} or have value in environment".format(m)) raise ArgumentError("Argument {} not supplied for credentials".format(m)) numa = NUMA(**creds) # Create a NUMA object numa.clean() # make sure we have a clean system # Create a new flavor that will have the extra specs we need numa_flavor = numa.create_flavor("numa_flavor", ram=ram, vcpus=vcpus, disksize=disk, specs=None) # Modify the flavor with the appropriate extra_specs numa_flavor = numa.create_numa_topo_extra_specs(flv=numa_flavor, numa_nodes=1) # Now we have a flavor with 2 NUMA nodes defined. You can display the extra_specs extra_specs = numa_flavor.get_keys() glob_logger.info(str(extra_specs))
dest = "root@{}:/etc/nova/nova.conf".format(cmpt) res = scp(src, dest) # TODO: Add the NUMATopologyFilter to the default_scheduler_filter list nova_conf = get_nova_conf(host) lines = get_cfg("scheduler_default_filters", nova_conf) lines_ = list(filter(lambda l: l.comment is None, lines)) if not lines_: glob_logger.error("Unable to get") # restart nova pci.openstack_service(cmpt, "restart", "nova") # Setup example creds = read_rc_file(args.server, "/root/keystonerc_admin") # Now, we create a PCI flavor and attempt to boot numa = NUMA(**creds) flv = numa.create_flavor("pci_small", ram=512, vcpus=1) pci_pass_flv = numa.create_pci_flavor(alias_name, flv=flv) glob_logger.info(str(pci_pass_flv.get_keys())) guest = numa.boot_instance(flv=pci_pass_flv, name="pci-testing") instance = numa.discover(guests=[guest])[0] # TODO verify the instance is actually using xmldump = instance.dumpxml() dump = untangle.parse(xmldump)
class VCPUPinTest(unittest.TestCase): def check_args(self, creds): missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error("Must supply --{} or have value in environment".format(m)) raise ArgumentError("Argument {} not supplied for credentials".format(m)) def __init__(self, args): super(VCPUPinTest, self).__init__() self.args = args creds = get_credentials(args) self.creds = creds self.check_args(creds) self.numa = NUMA(**creds) # Create a NUMA object self.numa.clean() # make sure we have a clean system self.logger = glob_logger # 1. First, make sure our system is enabled for vcpu pinning. We assume that our environment # is a nested virtual environment with 2 L1 hypervisors running on the L0 baremetal host. # First, get the virsh capabilities from the baremetal. self.computes = get_computes(args) self.nested = False if len(self.computes[0]) > 1: self.nested = True ensure_nested_support(self.computes) def create_aggregates(self): # 2. Create aggregate server groups: 1 for pinned=true, the other for pinned=false if len(self.computes) > 1: meta = "pinned" pos_agg, neg_agg, ip = self.numa.create_aggregate_groups(meta) extra = {meta: "true"} else: extra = {} pos_agg, neg_agg = None, None ip = self.computes[0][0] return pos_agg, neg_agg, ip, extra def create_pin_flavors(self, name, ram=512, vcpus=2, disk=10, extra=None): # 3. Create the pin flavors if extra is None: extra = {} pin_flavor = self.numa.create_flavor(name, ram=ram, vcpus=vcpus, disksize=disk, specs=extra) pin_flavor = self.numa.create_vcpu_pin_flavor(flv=pin_flavor) glob_logger.info(str(pin_flavor.get_keys())) return pin_flavor def boot_pinned_instances(self, pin_flavor): pin_instance = self.numa.boot_instance(flv=pin_flavor, name="pin_test") active = smog.nova.poll_status(pin_instance, "ACTIVE") if not active: glob_logger.error("FAIL: The pinned instance could not be created") def verify(self): # 4. Verify the instance is actually pinned by looking at the domain's <cputune> element # Validation here can get tricky if you use this in combination with VCPU Topology. # By default if you dont have VCPU topology configured, then you should have one <vcpupin> node # for each vcpu you requested to have pinned. But, if you combine this with a vcpu topology, # that may not be the case (for example, you can define a topology with multiple cores per # socket, rather than one core per socket (then there's threads....) discovered = self.numa.discover()[0] root = ET.fromstring(discovered.dumpxml()) cputune = next(root.iter("cputune")) vcpupins = [child for child in cputune.iter() if child.tag == "vcpupin"] def tearDown(self): self.numa.clean() def test_pin_instance(self): pos_agg, neg_agg, ip, extra = self.create_aggregates() ram, vcpus, disk = get_flavor(self.args) bigger_pin = vcpus small_pin = 1 pin_big_flv = self.create_pin_flavors("pin_big", ram=ram, vcpus=bigger_pin, disk=disk, extra=extra) pin_small_flv = self.create_pin_flavors("pin_small", ram=512, vcpus=small_pin, extra=extra) # Check how many pcpus we have if not self.nested: computes = [x[0] for x in self.computes] else: computes = [x.host for x in compute_factory(self.computes)] cpus_left = 0 for ip in computes: conn = smog.virt.get_connection(ip) cells = smog.virt.get_cpu_topology(conn) info = smog.virt.friendly_topology(cells) num_nodes = info["num_numa_nodes"] # Get all pcpus for i in range(num_nodes): cpus_left += int(info[i]["cpus"]) id_ = 0 while cpus_left > 0: test_name = "pintest_" + str(id_) if cpus_left >= bigger_pin: self.logger.info("Booting instance with pin_flavor_2...") inst = self.numa.boot_instance(flv=pin_big_flv, name=test_name) cpus_left -= bigger_pin vcpus = bigger_pin elif cpus_left == 1: self.logger.info("Booting instance with pin_flavor_1...") inst = self.numa.boot_instance(flv=pin_small_flv, name=test_name) cpus_left -= small_pin vcpus = small_pin id_ += 1 if inst: active = smog.nova.poll_status(inst, "ACTIVE") self.assertTrue(active) # Now, verify we actually have this pinned and that the # instance is on the right host discovered = self.numa.discover(guests=[inst])[0] xmldump = discovered.dumpxml() dump = untangle.parse(xmldump) placement = dump.domain.vcpu["placement"] txt = str(dump.domain.vcpu.cdata) self.assertTrue(placement == "static") self.logger.info("<vcpu placement='{}'>".format(placement)) self.assertTrue(txt == str(vcpus)) self.logger.info("vcpu cdata = {}".format(txt)) self.assertTrue(discovered.host.host == ip)
class VCPUPinTest(unittest.TestCase): def check_args(self, creds): missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error( "Must supply --{} or have value in environment".format(m)) raise ArgumentError( "Argument {} not supplied for credentials".format(m)) def __init__(self, args): super(VCPUPinTest, self).__init__() self.args = args creds = get_credentials(args) self.creds = creds self.check_args(creds) self.numa = NUMA(**creds) # Create a NUMA object self.numa.clean() # make sure we have a clean system self.logger = glob_logger # 1. First, make sure our system is enabled for vcpu pinning. We assume that our environment # is a nested virtual environment with 2 L1 hypervisors running on the L0 baremetal host. # First, get the virsh capabilities from the baremetal. self.computes = get_computes(args) self.nested = False if len(self.computes[0]) > 1: self.nested = True ensure_nested_support(self.computes) def create_aggregates(self): # 2. Create aggregate server groups: 1 for pinned=true, the other for pinned=false if len(self.computes) > 1: meta = "pinned" pos_agg, neg_agg, ip = self.numa.create_aggregate_groups(meta) extra = {meta: "true"} else: extra = {} pos_agg, neg_agg = None, None ip = self.computes[0][0] return pos_agg, neg_agg, ip, extra def create_pin_flavors(self, name, ram=512, vcpus=2, disk=10, extra=None): # 3. Create the pin flavors if extra is None: extra = {} pin_flavor = self.numa.create_flavor(name, ram=ram, vcpus=vcpus, disksize=disk, specs=extra) pin_flavor = self.numa.create_vcpu_pin_flavor(flv=pin_flavor) glob_logger.info(str(pin_flavor.get_keys())) return pin_flavor def boot_pinned_instances(self, pin_flavor): pin_instance = self.numa.boot_instance(flv=pin_flavor, name="pin_test") active = smog.nova.poll_status(pin_instance, "ACTIVE") if not active: glob_logger.error("FAIL: The pinned instance could not be created") def verify(self): # 4. Verify the instance is actually pinned by looking at the domain's <cputune> element # Validation here can get tricky if you use this in combination with VCPU Topology. # By default if you dont have VCPU topology configured, then you should have one <vcpupin> node # for each vcpu you requested to have pinned. But, if you combine this with a vcpu topology, # that may not be the case (for example, you can define a topology with multiple cores per # socket, rather than one core per socket (then there's threads....) discovered = self.numa.discover()[0] root = ET.fromstring(discovered.dumpxml()) cputune = next(root.iter("cputune")) vcpupins = [ child for child in cputune.iter() if child.tag == "vcpupin" ] def tearDown(self): self.numa.clean() def test_pin_instance(self): pos_agg, neg_agg, ip, extra = self.create_aggregates() ram, vcpus, disk = get_flavor(self.args) bigger_pin = vcpus small_pin = 1 pin_big_flv = self.create_pin_flavors("pin_big", ram=ram, vcpus=bigger_pin, disk=disk, extra=extra) pin_small_flv = self.create_pin_flavors("pin_small", ram=512, vcpus=small_pin, extra=extra) # Check how many pcpus we have if not self.nested: computes = [x[0] for x in self.computes] else: computes = [x.host for x in compute_factory(self.computes)] cpus_left = 0 for ip in computes: conn = smog.virt.get_connection(ip) cells = smog.virt.get_cpu_topology(conn) info = smog.virt.friendly_topology(cells) num_nodes = info["num_numa_nodes"] # Get all pcpus for i in range(num_nodes): cpus_left += int(info[i]["cpus"]) id_ = 0 while cpus_left > 0: test_name = "pintest_" + str(id_) if cpus_left >= bigger_pin: self.logger.info("Booting instance with pin_flavor_2...") inst = self.numa.boot_instance(flv=pin_big_flv, name=test_name) cpus_left -= bigger_pin vcpus = bigger_pin elif cpus_left == 1: self.logger.info("Booting instance with pin_flavor_1...") inst = self.numa.boot_instance(flv=pin_small_flv, name=test_name) cpus_left -= small_pin vcpus = small_pin id_ += 1 if inst: active = smog.nova.poll_status(inst, "ACTIVE") self.assertTrue(active) # Now, verify we actually have this pinned and that the # instance is on the right host discovered = self.numa.discover(guests=[inst])[0] xmldump = discovered.dumpxml() dump = untangle.parse(xmldump) placement = dump.domain.vcpu["placement"] txt = str(dump.domain.vcpu.cdata) self.assertTrue(placement == "static") self.logger.info("<vcpu placement='{}'>".format(placement)) self.assertTrue(txt == str(vcpus)) self.logger.info("vcpu cdata = {}".format(txt)) self.assertTrue(discovered.host.host == ip)
ram, vcpus, disk = args.flavor.split(",") if args.key is not None: pairs = [item.split("=") for item in args.key] specs = {k: v for k, v in pairs} username = args.username tenant_name = args.tenant_name auth_url = args.auth_url password = args.password creds = {"username": username, "tenant_name": tenant_name, "auth_url": auth_url, "password": password} missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error("Must supply --{} or have value in environment".format(m)) raise ArgumentError("Argument {} not supplied for credentials".format(m)) numa = NUMA(**creds) # Create a NUMA object numa.clean() # make sure we have a clean system # Create a new flavor that will have the extra specs we need numa_flavor = numa.create_flavor("numa_flavor", ram=ram, vcpus=vcpus, disksize=disk, specs=None) # Modify the flavor with the appropriate extra_specs numa_flavor = numa.create_numa_topo_extra_specs(flv=numa_flavor, numa_nodes=1) # Now we have a flavor with 2 NUMA nodes defined. You can display the extra_specs extra_specs = numa_flavor.get_keys() glob_logger.info(str(extra_specs)) # Now that we have a flavor with a simple numa topology defined, we can boot an instance. # Note that the flavor that was defined only specified 1 NUMA nodes and a memory policy of # preferred. There are many additional permutations that can be done, such as having asymmetrical