def __init__(self, args):
        super(VCPUPinTest, self).__init__()
        self.args = args
        creds = get_credentials(args)
        self.creds = creds
        self.check_args(creds)
        self.numa = NUMA(**creds)  # Create a NUMA object
        self.numa.clean()  # make sure we have a clean system
        self.logger = glob_logger

        # 1. First, make sure our system is enabled for vcpu pinning.  We assume that our environment
        #    is a nested virtual environment with 2 L1 hypervisors running on the L0 baremetal host.
        #    First, get the virsh capabilities from the baremetal.
        self.computes = get_computes(args)
        self.nested = False
        if len(self.computes[0]) > 1:
            self.nested = True
            ensure_nested_support(self.computes)
Exemplo n.º 2
0
tenant_name = args.tenant_name
auth_url = args.auth_url
password = args.password
creds = {
    "username": username,
    "tenant_name": tenant_name,
    "auth_url": auth_url,
    "password": password
}
missing = [k for k, v in creds.items() if v is None]
for m in missing:
    glob_logger.error(
        "Must supply --{} or have value in environment".format(m))
    raise ArgumentError("Argument {} not supplied for credentials".format(m))

numa = NUMA(**creds)  # Create a NUMA object
numa.clean()  # make sure we have a clean system

# Create a new flavor that will have the extra specs we need
numa_flavor = numa.create_flavor("numa_flavor",
                                 ram=ram,
                                 vcpus=vcpus,
                                 disksize=disk,
                                 specs=None)

# Modify the flavor with the appropriate extra_specs
numa_flavor = numa.create_numa_topo_extra_specs(flv=numa_flavor, numa_nodes=1)

# Now we have a flavor with 2 NUMA nodes defined.  You can display the extra_specs
extra_specs = numa_flavor.get_keys()
glob_logger.info(str(extra_specs))
Exemplo n.º 3
0
    dest = "root@{}:/etc/nova/nova.conf".format(cmpt)
    res = scp(src, dest)

    # TODO: Add the NUMATopologyFilter to the default_scheduler_filter list
    nova_conf = get_nova_conf(host)
    lines = get_cfg("scheduler_default_filters", nova_conf)
    lines_ = list(filter(lambda l: l.comment is None, lines))
    if not lines_:
        glob_logger.error("Unable to get")

    # restart nova
    pci.openstack_service(cmpt, "restart", "nova")


# Setup example
creds = read_rc_file(args.server, "/root/keystonerc_admin")

# Now, we create a PCI flavor and attempt to boot
numa = NUMA(**creds)
flv = numa.create_flavor("pci_small", ram=512, vcpus=1)
pci_pass_flv = numa.create_pci_flavor(alias_name, flv=flv)
glob_logger.info(str(pci_pass_flv.get_keys()))

guest = numa.boot_instance(flv=pci_pass_flv, name="pci-testing")
instance = numa.discover(guests=[guest])[0]

# TODO verify the instance is actually using
xmldump = instance.dumpxml()
dump = untangle.parse(xmldump)