def pip_cmd(self, pkgnames, cmd="install", pip_args=None): if pip_args is None: pip_args = [] try: from setuptools import find_packages import pip except ImportError as ie: glob_logger.error(ie.msg) pip_args.append(cmd) if isinstance(pkgnames, str): pip_args.append(pkgnames) else: ## concatenate the lists pip_args += [pkg for pkg in pkgnames] msg = "Running pip " + " ".join(pip_args) glob_logger.info(msg) try: import pip pip.main(initial_args=pip_args) except ImportError as ie: self.logger.error("Unable to import pip") raise ie
def test_and_set_nested(host, timeout=600): """ Verifies that the host has nested virtualization set for kvm module :param host: :param timeout: :return: ProcessResult """ cmd = "cat /sys/module/kvm_intel/parameters/nested" res = Command(cmd, host=host)(showout=False) if res.output.strip() != "Y": # Reboot the masters machine glob_logger.info("rebooting {} to set nested param".format(host)) rebooter(host, timeout=timeout) time.sleep(45) # Fudge factor here... pinger(host, timeout=timeout) # After reboot, make sure nested support is there path = "/sys/module/kvm_intel/parameters/nested" cmd = "cat {}".format(path) try: res = Command(cmd, host=host)(showout=False) if res.output.strip() != "Y": glob_logger.error("{}={}".format(path, res.output.strip())) raise sce.ConfigException("Nested support still not enabled") except CommandException: raise sce.ConfigException("Nested support still not enabled") return res
def check_env(field, key): val = getattr(opts, field) glob_logger.info("Value of opts.{} is {}".format(field, val)) if val is None and key in os.environ: msg = "Setting opts.{} to {}".format(field, os.environ[key]) glob_logger.info(msg) setattr(opts, field, os.environ[key])
def _setup(self): """ Use this instead of self.setUp() because we don't want to run this before the test method itself :return: """ if not hasattr(self, "sanity"): self.sanity = NovaSanity() self._base_setup(self.sanity) # We cant tell nova which hosts to build on. So we create a tiny guest # and add it to an affinity group. Then we can determine which hosts # it is on, get its numa characteristics and start adding other guests # to that hosts # FIXME: remove the hard-coded names glob_logger.info("Booting up initial instance") test_flv = self.sanity.get_flavor("m1.tiny") test_img = self.sanity.get_image_name("cirros") test_inst = self.sanity.boot_instances([test_img], [test_flv]) self.data.update({"test_instances": test_inst}) # Figure out what the master is instances = self.sanity.discover() test_vm = instances[0] hosts = self.config["hosts"] computes = hosts["computes"] host_for_test = test_vm.host.host for compute in computes: if compute["host"] == host_for_test: self.data.update({"master": compute["parent"]}) self.data.update({"compute_name": compute["name"]}) self.data.update({"compute_host": compute["host"]}) break
def freader(fobj, monitor=None, save=None, showout=True, proc=None): """ Small function which can be thrown into a thread to read a long running subprocess :param fobj: a file like object that will be read from :param monitor: A Queue object :param interval: polling interval between reads :param save: (list) by default dont save, otherwise append output to this """ while not fobj.closed: if proc is not None: if proc.poll() is not None: break try: item = monitor.get_nowait() if item == "stop": break except queue.Empty: pass except AttributeError: pass line = fobj.readline() # blocks when nothing in fobj buffer if line and showout: LOGGER.info(line.strip()) if save is not None: save.put(line)
def ensure_nested_support(computes): glob_logger.info( "Ensuring that L1 hypervisors are set match host model...") _computes = compute_factory(computes) _setup_nested_support( _computes ) # Will make sure that our L1 compute nodes have host-model match
def config_factory(opts): glob_logger.info("Creating Config object") with open(opts.config) as cfg: txt = cfg.read() config = yaml.load(txt) cfg = Config(opts, **config) return cfg
def _poll_for_status(instance, status, poll_interval=2, timeout=300, log=False): """ Polls for the status of a nova instance :param instance: The nova instance object to poll :param status: What status to check for. If "deleted", polls until the instance has been deleted :param poll_interval: :param timeout: :return: """ start_time = time.time() def timer(): endtime = start_time + timeout if timeout is None: return True else: timenow = time.time() check = endtime > timenow return check achieved = False while timer(): try: instance.get() except NotFound as nf: if status == "deleted": achieved = True break else: raise nf except AttributeError as ae: if status == "deleted": achieved = True break else: raise ae else: if instance.status == "ERROR": if status == "ERROR": achieved = True else: glob_logger.error("Failed to boot instance") break if instance.status != status: if log: msg = "Checking for {} on {}: status is {}" msg = msg.format(status, instance.name, instance.status) glob_logger.info(msg) time.sleep(poll_interval) else: achieved = True break return achieved
def result(self): if self._result is None: glob_logger.info("Getting result from res_q") if self.res_q.empty(): self._result = ("Failed", "") else: self._result = self.res_q.get() return self._result
def inner(*args): self = args[0] line = args[1] if line == "==magic-fail==": glob_logger.info("Handler is terminating") self.rdr.terminate() return False return fn(*args)
def __del__(self): if self._reader_proc: self._reader_proc.terminate() if self._monitor_proc: self._monitor_proc.terminate() if self._log: glob_logger.info("closing log file") self._log.close()
def create_pin_flavors(self, name, ram=512, vcpus=2, disk=10, extra=None): # 3. Create the pin flavors if extra is None: extra = {} pin_flavor = self.numa.create_flavor(name, ram=ram, vcpus=vcpus, disksize=disk, specs=extra) pin_flavor = self.numa.create_vcpu_pin_flavor(flv=pin_flavor) glob_logger.info(str(pin_flavor.get_keys())) return pin_flavor
def __call__(self, line): instance = "[instance: {}] Creating config drive".format(self.id) if instance in line: self.found += 1 if self.found >= self.counts: self.res_q.put(("Success", line)) glob_logger.info("Found a match: " + line) self.rdr.terminate() return False return True
def __call__(self, line): m = self.fn(line) if m: self.found += 1 self.res_q.put(("Success", line)) glob_logger.info("Found a match: " + line) if self.counts is not None and self.found >= self.counts: self.rdr.terminate() return False return True
def check_kvm_file(host): """ Checks of the /dev/kvm special file exists on host :param host: (str) IP address of machine :return: ProcessResult object (or throws ConfigException) """ glob_logger.info("Checking /dev/kvm") res = Command("file /dev/kvm", host=host)() if "cannot open" in res.output: raise sce.ConfigException("no /dev/kvm on {}".format(host)) return res
def set_host_model(hyper_ip, dom_name, user="******"): """ Can be used as fn arg to set_nested_vm_support :param hyper_ip: the IP address of hypervisor machine :param dom_name: the libvirt domain name :param user: user to connect to libvirt as :return: ProcessResult of executing virt-xml command """ glob_logger.info("Setting host_model mode for {}".format(dom_name)) cmd = "virt-xml --connect=qemu+ssh://{}@{}/system {} --edit --cpu " \ "host-model-only,+vmx" cmd = cmd.format(user, hyper_ip, dom_name) return Command(cmd)()
def check_reader(self): """ Makes sure that the Handler's reader process/thread is still going. No point in reading if the process/thread isn't emitting any more data. """ keep_going = True if hasattr(self.rdr, "is_alive"): if not self.rdr.is_alive(): glob_logger.info("Thread has died") keep_going = False if hasattr(self.rdr, "poll"): if self.rdr.poll() is not None: glob_logger.info("Process has terminated") keep_going = False return keep_going
def set_host_passthrough(hyper_ip, dom_name, user="******"): """ Sets a domain's <cpu> element to use mode host-passthrough :param hyper_ip: (str) IP address of host with hypervisor :param dom_name: (str) the libvirt domain name :param user: (str) user to connect to libvirt hypervisor :return: ProcessTresult """ # FIXME: How do we do this just using libvirt? This adds a dependency # on virt-xml # Edit the domain's xml to use host-passthrough mode glob_logger.info("Setting host-passthrough mode for {}".format(dom_name)) cmd = "virt-xml --connect=qemu+ssh://{}@{}/system {} --edit --cpu " \ "host-passthrough,clearxml=yes".format(user, hyper_ip, dom_name) res = Command(cmd)() return res
def _upgrade(self, commands): # Make sure to stop the service first for host in self.hosts: cmds = [Command(x, host=host) for x in commands] for cmd in cmds: glob_logger.info("Calling: {}".format(cmd.cmd)) try: res = cmd() if res != 0: glob_logger.error("Unable to run {}".format(cmd.cmd)) except: cmdstr = cmd.cmd glob_logger.error("Could not execute {}".format(cmdstr)) state = self.get_service_state() if "active" not in state: raise Exception("Service {} did not come up".format(self.name))
def turn_on(hv_ip, domain_name, ip_addr, wait=5): """ Powers on a VM given the hypervisor IP address and VM's domain name :param bare_metal: :param domain_name: :param ip_addr: :return: """ dom_ = get_domain(hv_ip, domain_name) if dom_.state()[0] not in [1]: # Start the L1 guest hypervisor glob_logger.info("Bringing back up L1 hypervisor {}".format(ip_addr)) power_on(dom_) time.sleep(1) pinger(ip_addr) time.sleep(wait) # Give a few seconds for services to come up return dom_
def finish(self, opts): """ In the skeleton.yml file, we have placeholders like <host>. We will fill these in according to the opts object """ def fn(field): opt = getattr(opts, field) if opt is None: return info = opt.split(",") for i, host_pair in enumerate(info): h_name, h_ip = host_pair.split(":") obj = getattr(self, field) if isinstance(obj, list): obj[i]["name"] = h_name obj[i]["host"] = h_ip else: obj["name"] = h_name obj["host"] = h_ip for fld in self.fields: glob_logger.info("Setting {}".format(fld)) if fld == "masters": if opts.masters is not None: masters = opts.masters.split(",") mstr = "master{}" master_d = {mstr.format(i): master for i, master in enumerate(masters, 1)} self.masters = master_d elif fld == "hosts": print("Getting controllers info...") fn("controllers") print("Getting computes info ...") fn("computes") elif fld == "smog": if opts.smog is not None: self.smog = yaml.load(opts.smog) elif fld == "nova": self.nova["nested_vm"] = opts.nova_nested self.nova["host_passthrough"] = opts.nova_passthrough self.nova["virt_type"] = opts.nova_virt_type if opts.nova_filters is not None: self.nova_filters = opts.nova_filters.split(",") print(self)
def pinger(host, timeout=300): """ :param host: IP address of machine to ping to :param timeout: timeout in seconds :return: None """ ping = Command("ping -W 4 -c 4 {}".format(host), stderr=PIPE) start_time = time.time() end_time = start_time + timeout while True: glob_logger.info("waiting for {} to come back up...".format(host)) res = ping(showout=False, throws=False) if res == 0: glob_logger.info("{} is back up".format(host)) break time.sleep(10) if time.time() > end_time: err = "Machine did not come back after {} seconds".format(timeout) raise Exception(err)
def close(self, que=None): line = "==magic-fail==" if que is None: que = self.queue try: glob_logger.info("putting ==magic-fail== in queue") que.put(line) except AssertionError: glob_logger.debug("queue got AssertionError") pass except Exception as ex: glob_logger.debug("queue got {}".format(ex)) glob_logger.info("closing queue") que.close() if self._process: glob_logger.debug("terminating watcher process") try: self._process.terminate() except ProcessLookupError: pass # Apparently, when the subprocess ends, the PIPE'd stdout # doesn't close. So we need to shut it down so that the # reader thread closes if not self._process.stdout.closed: self._process.stdout.close() if self._log: glob_logger.info("closing log file") self._log.close()
def verify_nested_kvm(host): """ Goes through loaded modules to see if kvm_intel or kvm_amd is loaded :param host: (str) IP Address of host :return: The CPU type (str) intel or amd """ glob_logger.info("Checking is kvm and kvm-intel or kvm-amd is running...") lsmod = Command("lsmod", host=host)(showout=False) patt = re.compile(r"kvm_(intel|amd)") out = lsmod.output for line in lsmod.output.split("\n")[1:]: m = patt.search(line) if m: proc = m.groups()[0] break else: raise sce.ConfigException("kvm module is not loaded") return proc
def monitor(handler, que_r): """ This function will consume items from the queue. The handler callable will be called on each item pulled from the queue and do something accordingly. To break out of the loop, the handler function will raise a special exception of MonitoredException The handler is a callable that returns True if the monitor should continue or False if the monitor should stop. The callable takes a single arg, which is a line that will be examined to determine whether it returns True or False :param handler: a predicate that takes a single string as an argument :param que: a multiprocessing.Queue :return: """ keep_going = True while keep_going: try: empty = que_r.empty() except OSError: glob_logger.info("queue is closed") break if not empty: try: line = que_r.get() keep_going = handler(line) except OSError: glob_logger.debug("queue is closed") break except MonitoredException: break except Exception as ex: glob_logger.debug("queue error type: {}".format(ex)) break glob_logger.info("monitor loop is finished")
def live_migrate(self, info): """ Performs a live migration if needed (for example, when the instance is on the same node as the one and only controllers) :param instance: An smog.base.Instance object :param controllers: (str) of the controllers ip address :param orig_host: (str) of the original hostname :param hosts: """ instance = info.instance controller = info.controller orig_host = info.orig_hostname # if controllers is the same as our compute name, we need to migrate it # to another hosts glob_logger.info("Need to migrate instance to another compute node") msg = "instance {} is on controllers hosts: {}" glob_logger.info(msg.format(instance.instance.id, controller)) filt = lambda x: x.host_ip != controller hypers = smog.nova.list_hypervisors(self.sanity.nova, fn=filt) if not hypers: raise Exception("No other compute nodes found") info.hypervisor = hypers[0] final = info.hypervisor.hypervisor_hostname # migrate to final msg = "Migrating instance({}) on hosts {} to hosts {}" glob_logger.info(msg.format(instance.instance.id, orig_host, final)) vm = instance.instance vm.live_migrate(host=final) # FIXME: what about block migration start_time = time.time() end_time = start_time + 30 while True: vm.get() host_attr = getattr(vm, "OS-EXT-SRV-ATTR:host") if host_attr and host_attr != orig_host: break if time.time() > end_time: raise Exception("live migration failed") time.sleep(1) # FIXME: Ugghhh all this state management. I feel dirty self.update_info(info, self.sanity) return info
short = short[:-1] # remove the .,- or _ return short, hostname # Get the domain from the share_storage config file domain_name = self.share_storage_cfg["nfs"]["nfs_idmapd"]["Domain"] # Get the /etc/hosts file from all the remote machines entries = {} for comp in self.computes: compute_entry = "{0} {1}".format(*get_host_names(comp, domain_name)) entries.update({comp: compute_entry}) for comp in self.computes: configure_hosts(comp, entries, "/etc/hosts", not_found="append", delim=" ") return True if __name__ == "__main__": cnfs = ConfigureNFS() if cnfs.opts.functions: for fn_name in cnfs.opts.functions.split(","): glob_logger.info("Calling {}".format(fn_name)) fn = getattr(cnfs, fn_name) fn() else: cnfs.setup()
def freader(watched, que, seek_end=True, log=sys.stdout, fail=magic): """ Opens up the watched object for reading. This object must have an open, close, and readline method. This function will be run in a separate process :param watched: (str) Path to a file that will be opened read-only :param que: (multiprocessing.Queue) Queue that will be used to communicate between freader and monitor :param seek_end: If true, seek to the end of the file before reading :param log: A file object or something with a write method :return: """ def _read(fobj): if seek_end: try: fobj.seek(0, 2) except: pass keep_going = True while keep_going: if fobj.closed: try: line = fobj.read() # get the last bit from the file except ValueError: break else: try: line = fobj.readline() except ValueError: break if line: if isinstance(line, bytes): line = line.decode() # Check to see if we need to break the reader thread if line == fail: break if log: try: log.write(line) except ValueError: pass try: que.put(line) except BrokenPipeError: break except Exception as ex: print(ex) break if fobj.closed: # Try to open up again, in case we have a rotated log file sys.stdout.write("Reopening {}".format(watched)) # hmmm, recursion freader(watched, que, seek_end=seek_end, log=log) if isinstance(watched, str) or isinstance(watched, bytes): with open(watched, "r") as tailed: _read(tailed) else: _read(watched) glob_logger.info("reader loop is finished")
def ensure_nested_support(computes): glob_logger.info("Ensuring that L1 hypervisors are set match host model...") _computes = compute_factory(computes) _setup_nested_support(_computes) # Will make sure that our L1 compute nodes have host-model match
for m in missing: glob_logger.error("Must supply --{} or have value in environment".format(m)) raise ArgumentError("Argument {} not supplied for credentials".format(m)) numa = NUMA(**creds) # Create a NUMA object numa.clean() # make sure we have a clean system # Create a new flavor that will have the extra specs we need numa_flavor = numa.create_flavor("numa_flavor", ram=ram, vcpus=vcpus, disksize=disk, specs=None) # Modify the flavor with the appropriate extra_specs numa_flavor = numa.create_numa_topo_extra_specs(flv=numa_flavor, numa_nodes=1) # Now we have a flavor with 2 NUMA nodes defined. You can display the extra_specs extra_specs = numa_flavor.get_keys() glob_logger.info(str(extra_specs)) # Now that we have a flavor with a simple numa topology defined, we can boot an instance. # Note that the flavor that was defined only specified 1 NUMA nodes and a memory policy of # preferred. There are many additional permutations that can be done, such as having asymmetrical # cpus to to NUMA nodes, asymmetrical memory to NUMA nodes, or combining NUMA topology with # vcpu pinning or large page memory support image = numa.get_image_name("cirros") instance = numa.boot_instance(img=image, flv=numa_flavor, name="numa_instance") # Poll to see when the instance is done booting up active = smog.nova.poll_status(instance, "ACTIVE", timeout=600) if not active: print("Failed to boot instance") # Now that the instance is actually up, check to see that it actually has 2 NUMA nodes defined
numa = NUMA(**creds) # Create a NUMA object numa.clean() # make sure we have a clean system # Create a new flavor that will have the extra specs we need numa_flavor = numa.create_flavor("numa_flavor", ram=ram, vcpus=vcpus, disksize=disk, specs=None) # Modify the flavor with the appropriate extra_specs numa_flavor = numa.create_numa_topo_extra_specs(flv=numa_flavor, numa_nodes=1) # Now we have a flavor with 2 NUMA nodes defined. You can display the extra_specs extra_specs = numa_flavor.get_keys() glob_logger.info(str(extra_specs)) # Now that we have a flavor with a simple numa topology defined, we can boot an instance. # Note that the flavor that was defined only specified 1 NUMA nodes and a memory policy of # preferred. There are many additional permutations that can be done, such as having asymmetrical # cpus to to NUMA nodes, asymmetrical memory to NUMA nodes, or combining NUMA topology with # vcpu pinning or large page memory support image = numa.get_image_name("cirros") instance = numa.boot_instance(img=image, flv=numa_flavor, name="numa_instance") # Poll to see when the instance is done booting up active = smog.nova.poll_status(instance, "ACTIVE", timeout=600) if not active: print("Failed to boot instance") # Now that the instance is actually up, check to see that it actually has 2 NUMA nodes defined
# let's see if we already have a VFS setup and we have the right kernel params for host in args.compute: is_grub_set = pci.verify_cmdline(host) # Check if intel_iommu=on # I have noticed that when I install RHEL, the default interface script does # not have the ON_BOOT=yes. That becomes a problem when we restart the network # because otherwise we will need to manually specify dhclient def_iface # TODO: either edit the /etc/sysconfig/network-scripts/ifcfg-{def_iface} # to use ON_BOOT=yes, or add in change_modprobe, to call dhclient def_iface # at the end of the script def_iface = pci.get_default_iface(host) # If we dont have intel_iommu=on in /proc/cmdline, we need to set it # and reboot the system if not is_grub_set: glob_logger.info("Setting intel_iommu=on") res1 = pci.set_grub_cmdline(host) res2 = pci.grub2_mkconfig(host) # reboot the host virt.rebooter(host) virt.pinger(host, timeout=600) # This is really only needed for SRIOV or PCI Passthrough with an ethernet # device (PCI passthrough and SRIOV only works on VF's not PF's) is_vfs_here = pci.get_lspci_info(host) # Check if we have VF's if not is_vfs_here: # So there's a bug with using /etc/modprobe.d and setting max_Vfs # in a conf file. So we have to do this ugly hack. # scp the change_modprobe.py to remote machine and run it. # poll until system is back up
def set_nested_vm_support(bare_m, dom_info, fn=set_host_passthrough, kvm=True, user="******", timeout=600): """ Sets nested support for the masters and any domains. This works by enabling KVM extensions on the baremetal host, and also setting the correct domain xml on the L1 hypervisors. :param bare_m: (str) ip address of the masters machine :param dom_info: a tuple of (ip, domain_name) for L1 guests :param fn: a function that takes the masters IP, a domain name, and user :param kvm: (bool) check to see if /dev/kvm exists :param user: :return: """ msg = "Verifying and setting nested VM support on {}..." glob_logger.info(msg.format(dom_info)) # Make sure that the L1 guests are running ip, dom_name = dom_info dom_ = get_domain(bare_m, dom_name) glob_logger.info("Making sure {} is running".format(dom_name)) state = dom_.state() if state[0] not in [1]: glob_logger.info("Powering up {}".format(dom_name)) power_on(dom_) # Make sure kvm module is loaded cpu = test_and_set_kvm_module(bare_m) # Make sure /etc/modprobe.d/dist.conf is set glob_logger.info("Verifying if /etc/modprobe.d/dist.conf is set") test_and_set_distconf(bare_m, cpu) # We only need to do this if our domain isn't already set for host # passthrough mode. So let's check it first root = ET.fromstring(dom_.XMLDesc()) cpu = list(root.iter("cpu"))[0] cpu_mode = fn.__name__ == "set_host_passthrough" or \ fn.__name__ == "set_host_model" mode_type = fn.__name__ if fn else None # Check to see if we already have the mode setup info = untangle.parse(dom_.XMLDesc()) passthrough_set = False host_model_set = False if info.domain.cpu["mode"] == "host-passthrough": passthrough_set = True elif info.domain.cpu["mode"] == "custom": host_model_set = True # Make sure that the L1 hypervisor is shutdown before taking down bare_m already_set = False if fn is not None and cpu_mode: if (mode_type == "set_host_passthrough" and passthrough_set) or \ (mode_type == "set_host_model" and host_model_set): already_set = True else: glob_logger.info("Taking down L1 hypervisor {}".format(ip)) state = dom_.state() if state[0] in [1]: shutdown(dom_) # Check if the masters has nested support after reboot _ = test_and_set_nested(bare_m) # If we aren't doing host passthrough, we're done if fn is None or already_set: turn_on(bare_m, dom_name, ip) pinger(ip) return # Otherwise, call our passthrough function if we dont already have # passthrough mode enabled if cpu_mode: glob_logger.info("calling {}".format(fn.__name__)) fn(bare_m, dom_name, user=user) turn_on(bare_m, dom_name, ip) pinger(ip) time.sleep(10) # TODO: uggh, need to know when SSH is up # Make sure we have the /dev/kvm special file if kvm: check_kvm_file(ip)
def _base(self, host=None, instance_name=None): """ Creates an instance on a multinode deployment, and determines which hosts the instance is on. If the guest is running on a node that is also a controllers, it will live migrate the instance to another compute node. It will then power off that hosts where the guest is running, and evacuate by specifying a possible hosts. It verifies that the status_code is 200 from the evacuate command. :param host: if "valid", chose a known good hosts. otherwise a hostname :return: """ info = self.get_info(self.sanity, self.data) if info.controller == info.comp_host: self.live_migrate(info) master = info.master name = info.name hv = info.hypervisor if instance_name is None: instance = info.instance else: instance = filter(lambda x: x.name == instance_name, info.instances)[0] conn = libvirt.open("qemu+ssh://root@{}/system".format(master)) domain = conn.lookupByName(name) glob_logger.info("Shutting down {} for evacuation".format(name)) smog.virt.shutdown(domain) # Make sure the hypervisor is down # FIXME: I believe there's a bug here. It doesn't seem like the state # or status of the hypervisor ever changes, even if it goes down hv.get() # refresh the state # Evacuate without specifying the hosts guest = instance.instance time.sleep(60) # FIXME: how to tell if using shared storage? filt = lambda x: x.host_ip == info.controller hvs = smog.nova.list_hypervisors(self.sanity.nova, fn=filt) if not hvs: raise Exception("Could not find a valid hosts") hostname = hvs[0].hypervisor_hostname if host == "valid": host = hostname glob_logger.info("Evacuating hosts to {}".format(hostname)) result = guest.evacuate(host=host) glob_logger.info("status_code = {}".format(result[0].status_code)) self.assertTrue(result[0].status_code == 200) # make sure it's on the right compute hosts discovered = self.sanity.discover()[0] dh = discovered.host.hostname msg = "discovered hostname {}, hostname {}".format(dh, hostname) glob_logger.info(msg) self.assertTrue(discovered.host.hostname == hostname) # power back on the compute hosts node smog.virt.power_on(domain) time.sleep(30) # FIXME: how do we know the system is fully up? cmd = Command("setenforce 0", host=info.comp_host) res = cmd(remote=True) self.assertTrue(res == 0)