def test_and_set_nested(host, timeout=600): """ Verifies that the host has nested virtualization set for kvm module :param host: :param timeout: :return: ProcessResult """ cmd = "cat /sys/module/kvm_intel/parameters/nested" res = Command(cmd, host=host)(showout=False) if res.output.strip() != "Y": # Reboot the masters machine glob_logger.info("rebooting {} to set nested param".format(host)) rebooter(host, timeout=timeout) time.sleep(45) # Fudge factor here... pinger(host, timeout=timeout) # After reboot, make sure nested support is there path = "/sys/module/kvm_intel/parameters/nested" cmd = "cat {}".format(path) try: res = Command(cmd, host=host)(showout=False) if res.output.strip() != "Y": glob_logger.error("{}={}".format(path, res.output.strip())) raise sce.ConfigException("Nested support still not enabled") except CommandException: raise sce.ConfigException("Nested support still not enabled") return res
def freader(fobj, monitor=None, save=None, showout=True, proc=None): """ Small function which can be thrown into a thread to read a long running subprocess :param fobj: a file like object that will be read from :param monitor: A Queue object :param interval: polling interval between reads :param save: (list) by default dont save, otherwise append output to this """ while not fobj.closed: if proc is not None: if proc.poll() is not None: break try: item = monitor.get_nowait() if item == "stop": break except queue.Empty: pass except AttributeError: pass line = fobj.readline() # blocks when nothing in fobj buffer if line and showout: LOGGER.info(line.strip()) if save is not None: save.put(line)
def pip_cmd(self, pkgnames, cmd="install", pip_args=None): if pip_args is None: pip_args = [] try: from setuptools import find_packages import pip except ImportError as ie: glob_logger.error(ie.msg) pip_args.append(cmd) if isinstance(pkgnames, str): pip_args.append(pkgnames) else: ## concatenate the lists pip_args += [pkg for pkg in pkgnames] msg = "Running pip " + " ".join(pip_args) glob_logger.info(msg) try: import pip pip.main(initial_args=pip_args) except ImportError as ie: self.logger.error("Unable to import pip") raise ie
def test_instance_create_multi(nova_auth): glob_logger.info("Nova multi instance create.") for node in range(10): nova_auth.boot_instance('iridium_test_%s' % node, '6348bd72-602f-41ce-99fa-49c6789bbeb3', '1') else: assert 0 glob_logger.info("Complete Nove multi create.")
def save_history(): """ Saves the session history to specified file in config module. :return: None """ # TODO figure out why this is not appending log details. glob_logger.info("Saving History...") log_path = config.logging['log_dir'] + make_timestamped_filename('iridium_cli_history') readline.write_history_file(log_path)
def save_history(): """ Saves the session history to specified file in config module. :return: None """ # TODO figure out why this is not appending log details. glob_logger.info("Saving History...") log_path = config.logging['log_dir'] + make_timestamped_filename( 'iridium_cli_history') readline.write_history_file(log_path)
def _poll_for_status(self, instance, status, poll_interval=2, timeout=300, log=False): """ Polls for the status of a nova instance :param instance: The nova instance object to poll :param status: What status to check for. If "deleted", polls until the instance has been deleted :param poll_interval: :param timeout: :return: """ start_time = time.time() def timer(): endtime = start_time + timeout if timeout is None: return True else: timenow = time.time() check = endtime > timenow return check achieved = False while timer(): try: instance.get() except NotFound as nf: if status == "deleted": achieved = True break else: raise nf except AttributeError as ae: if status == "deleted": achieved = True break else: raise ae else: if instance.status == "ERROR": if status == "ERROR": achieved = True else: glob_logger.error("Failed to boot instance") break if instance.status != status: if log: msg = "Checking for {} on {}: status is {}" msg = msg.format(status, instance.name, instance.status) glob_logger.info(msg) time.sleep(poll_interval) else: achieved = True break return achieved
def check_kvm_file(host): """ Checks of the /dev/kvm special file exists on host :param host: (str) IP address of machine :return: ProcessResult object (or throws ConfigException) """ glob_logger.info("Checking /dev/kvm") res = Command("file /dev/kvm", host=host)() if "cannot open" in res.output: raise sce.ConfigException("no /dev/kvm on {}".format(host)) return res
def set_host_model(hyper_ip, dom_name, user="******"): """ Can be used as fn arg to set_nested_vm_support :param hyper_ip: the IP address of hypervisor machine :param dom_name: the libvirt domain name :param user: user to connect to libvirt as :return: ProcessResult of executing virt-xml command """ glob_logger.info("Setting host_model mode for {}".format(dom_name)) cmd = "virt-xml --connect=qemu+ssh://{}@{}/system {} --edit --cpu " "host-model-only,+vmx" cmd = cmd.format(user, hyper_ip, dom_name) return Command(cmd)()
def set_host_model(hyper_ip, dom_name, user="******"): """ Can be used as fn arg to set_nested_vm_support :param hyper_ip: the IP address of hypervisor machine :param dom_name: the libvirt domain name :param user: user to connect to libvirt as :return: ProcessResult of executing virt-xml command """ glob_logger.info("Setting host_model mode for {}".format(dom_name)) cmd = "virt-xml --connect=qemu+ssh://{}@{}/system {} --edit --cpu " \ "host-model-only,+vmx" cmd = cmd.format(user, hyper_ip, dom_name) return Command(cmd)()
def set_host_passthrough(hyper_ip, dom_name, user="******"): """ Sets a domain'nova_tests <cpu> element to use mode host-passthrough :param hyper_ip: (str) IP address of host with hypervisor :param dom_name: (str) the libvirt domain name :param user: (str) user to connect to libvirt hypervisor :return: ProcessTresult """ # FIXME: How do we do this just using libvirt? This adds a dependency # on virt-xml # Edit the domain'nova_tests xml to use host-passthrough mode glob_logger.info("Setting host-passthrough mode for {}".format(dom_name)) cmd = "virt-xml --connect=qemu+ssh://{}@{}/system {} --edit --cpu " \ "host-passthrough,clearxml=yes".format(user, hyper_ip, dom_name) res = Command(cmd)() return res
def turn_on(hv_ip, domain_name, ip_addr, wait=5): """ Powers on a VM given the hypervisor IP address and VM'nova_tests domain name :param bare_metal: :param domain_name: :param ip_addr: :return: """ dom_ = get_domain(hv_ip, domain_name) if dom_.state()[0] not in [1]: # Start the L1 guest hypervisor glob_logger.info("Bringing back up L1 hypervisor {}".format(ip_addr)) power_on(dom_) time.sleep(1) pinger(ip_addr) time.sleep(wait) # Give a few seconds for services to come up return dom_
def keystone_retrieve(version: str = 'v2', read_export: bool = False, **kwargs: dict) -> dict: """ Simple function to retrieve configuration information from the global environment, if no kwargs is passed in, the necessary information is retrieved from the environment (ie, as when you source keystonerc_admin) :type read_export: bool :param version sets the version of ReST protocol to implement. (ie. "/v2.0", "/v3") kwargs: auth_url location to contact the keystone server. username usename to authenticate against keystone server. password password for username. project_name (version 3) or tenant_name (version 2) project credential for user. user_domain_name domain for username only valid for version 3 protocol. project_domain_name domain for specified project onnly valid for version 3. :rtype : dict :return: A dictionary that can be used with keystone client. """ coll = CFG.dump_config(IRIDIUM_CONF) creds_coll = CFG.lookup(config_dict=coll, search_key=version) if not kwargs and read_export: glob_logger.info("Reading Environmental variables..") creds = { "username": os.environ.get("OS_USERNAME"), "password": os.environ.get("OS_PASSWORD"), "auth_url": os.environ.get("OS_AUTH_URL"), "tenant_name": os.environ.get("OS_TENANT_NAME") } # Here we use built-in config file. if not kwargs: creds = {k: v for k, v in creds_coll.items() if v is not None} # Else we allow override of built-in dictionary. elif kwargs: creds = {k: v for k, v in kwargs.items() if v is not None} glob_logger.debug("Using keystone creds: {}".format(creds)) return creds
def keystone_retrieve(version: str= 'v2', read_export: bool = False, **kwargs: dict) -> dict: """ Simple function to retrieve configuration information from the global environment, if no kwargs is passed in, the necessary information is retrieved from the environment (ie, as when you source keystonerc_admin) :type read_export: bool :param version sets the version of ReST protocol to implement. (ie. "/v2.0", "/v3") kwargs: auth_url location to contact the keystone server. username usename to authenticate against keystone server. password password for username. project_name (version 3) or tenant_name (version 2) project credential for user. user_domain_name domain for username only valid for version 3 protocol. project_domain_name domain for specified project onnly valid for version 3. :rtype : dict :return: A dictionary that can be used with keystone client. """ coll = CFG.dump_config(IRIDIUM_CONF) creds_coll = CFG.lookup(config_dict=coll, search_key=version) if not kwargs and read_export: glob_logger.info("Reading Environmental variables..") creds = { "username": os.environ.get("OS_USERNAME"), "password": os.environ.get("OS_PASSWORD"), "auth_url": os.environ.get("OS_AUTH_URL"), "tenant_name": os.environ.get("OS_TENANT_NAME") } # Here we use built-in config file. if not kwargs: creds = {k: v for k, v in creds_coll.items() if v is not None} # Else we allow override of built-in dictionary. elif kwargs: creds = {k: v for k, v in kwargs.items() if v is not None} glob_logger.debug("Using keystone creds: {}".format(creds)) return creds
def pinger(host, timeout=300): """ :param host: IP address of machine to ping to :param timeout: timeout in seconds :return: None """ ping = Command("ping -W 4 -c 4 {}".format(host), stderr=PIPE) start_time = time.time() end_time = start_time + timeout while True: glob_logger.info("waiting for {} to come back up...".format(host)) res = ping(showout=False, throws=False) if res == 0: glob_logger.info("{} is back up".format(host)) break time.sleep(10) if time.time() > end_time: err = "Machine did not come back after {} seconds".format(timeout) raise Exception(err)
def verify_nested_kvm(host): """ Goes through loaded modules to see if kvm_intel or kvm_amd is loaded :param host: (str) IP Address of host :return: The CPU type (str) intel or amd """ glob_logger.info("Checking is kvm and kvm-intel or kvm-amd is running...") lsmod = Command("lsmod", host=host)(showout=False) patt = re.compile(r"kvm_(intel|amd)") out = lsmod.output for line in lsmod.output.split("\n")[1:]: m = patt.search(line) if m: proc = m.groups()[0] break else: raise sce.ConfigException("kvm module is not loaded") return proc
def test_instance_list(nova_auth): glob_logger.info("Nova instance list test.") print(nova_auth.list_instances()) glob_logger.debug('List of instances: %s' % list)
def test_list_chassis(ironic_cl): glob_logger.info("")
def test_instance_creation(nova_auth): glob_logger.info("Nova instance creation test...") #assert nova_auth.boot_instance('iridium_test', '6348bd72-602f-41ce-99fa-49c6789bbeb3', '1') assert nova_auth.boot_instance('iridium_test', '22b40f63-2b37-4855-8f32-07da65bd3c43', '1')
def test_create_chassis(ironic_cl): glob_logger.info("creating chassis") ironic_cl.create_chassis("pytest_chassis")
def set_nested_vm_support(bare_m, dom_info, fn=set_host_passthrough, kvm=True, user="******", timeout=600): """ Sets nested support for the masters and any domains. This works by enabling KVM extensions on the baremetal host, and also setting the correct domain xml on the L1 hypervisors. :param bare_m: (str) ip address of the masters machine :param dom_info: a tuple of (ip, domain_name) for L1 guests :param fn: a function that takes the masters IP, a domain name, and user :param kvm: (bool) check to see if /dev/kvm exists :param user: :return: """ msg = "Verifying and setting nested VM support on {}..." glob_logger.info(msg.format(dom_info)) # Make sure that the L1 guests are running ip, dom_name = dom_info dom_ = get_domain(bare_m, dom_name) glob_logger.info("Making sure {} is running".format(dom_name)) state = dom_.state() if state[0] not in [1]: glob_logger.info("Powering up {}".format(dom_name)) power_on(dom_) # Make sure kvm module is loaded cpu = test_and_set_kvm_module(bare_m) # Make sure /etc/modprobe.d/dist.conf is set glob_logger.info("Verifying if /etc/modprobe.d/dist.conf is set") test_and_set_distconf(bare_m, cpu) # We only need to do this if our domain isn't already set for host # passthrough mode. So let'nova_tests check it first root = ET.fromstring(dom_.XMLDesc()) cpu = list(root.iter("cpu"))[0] cpu_mode = fn.__name__ == "set_host_passthrough" or \ fn.__name__ == "set_host_model" mode_type = fn.__name__ if fn else None # Check to see if we already have the mode setup info = untangle.parse(dom_.XMLDesc()) passthrough_set = False host_model_set = False if info.domain.cpu["mode"] == "host-passthrough": passthrough_set = True elif info.domain.cpu["mode"] == "custom": host_model_set = True # Make sure that the L1 hypervisor is shutdown before taking down bare_m already_set = False if fn is not None and cpu_mode: if (mode_type == "set_host_passthrough" and passthrough_set) or \ (mode_type == "set_host_model" and host_model_set): already_set = True else: glob_logger.info("Taking down L1 hypervisor {}".format(ip)) state = dom_.state() if state[0] in [1]: shutdown(dom_) # Check if the masters has nested support after reboot _ = test_and_set_nested(bare_m) # If we aren't doing host passthrough, we're done if fn is None or already_set: turn_on(bare_m, dom_name, ip) pinger(ip) return # Otherwise, call our passthrough function if we dont already have # passthrough mode enabled if cpu_mode: glob_logger.info("calling {}".format(fn.__name__)) fn(bare_m, dom_name, user=user) turn_on(bare_m, dom_name, ip) pinger(ip) time.sleep(10) # TODO: uggh, need to know when SSH is up # Make sure we have the /dev/kvm special file if kvm: check_kvm_file(ip)