def boot_instance(nc, server_name, server_image, flavor, **kwargs): """ Boots up a compute instance **Sigh** So, in kilo you now have to specify a neutron network ID :param nc: a nova client instance :param server_name: string of name to give to compute instance :param server_image: the nova image object to boot up :param flavor: the flavor :return: instance if successful, None otherwise """ default = dict([("name", server_name), ("image", server_image), ("flavor", flavor)]) if not kwargs: kwargs = default else: kwargs.update(default) instance = nc.servers.create(**kwargs) servers = nc.servers.list() for s in servers: if s.name == server_name: return instance else: glob_logger.error("Base image did not boot up") return None
def download_url(urlpath, output_dir=".", binary=False): try: thing = urlopen(urlpath) except Exception as e: print(str(e)) return parsed = urlparse(urlpath) filename = os.path.basename(parsed.path) writemod = "wb" if binary else "w" fobj = thing.read() if output_dir != ".": if not os.path.exists(output_dir): glob_logger.error("{0} does not exist".format(output_dir)) glob_logger.error("Writing file to {0}".format(os.getcwd())) else: filename = "/".join([output_dir, filename]) with open(filename, writemod) as downloaded: try: downloaded.write(fobj) except TypeError: with open(filename, "wb") as downloaded: downloaded.write(fobj) return os.path.exists(filename)
def check_args(self, creds): missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error( "Must supply --{} or have value in environment".format(m)) raise ArgumentError( "Argument {} not supplied for credentials".format(m))
def download_url(urlpath, output_dir=".", binary=False): try: thing = urlopen(urlpath) except Exception as e: print(str(e)) return parsed = urlparse(urlpath) filename = os.path.basename(parsed.path) writemod = "wb" if binary else "w" fobj = thing.read() if output_dir != ".": if not os.path.exists(output_dir): glob_logger.error("{0} does not exist".format(output_dir)) glob_logger.error("Writing file to {0}".format(os.getcwd())) else: filename = "/".join([output_dir,filename]) with open(filename, writemod) as downloaded: try: downloaded.write(fobj) except TypeError: with open(filename, "wb") as downloaded: downloaded.write(fobj) return os.path.exists(filename)
def pip_cmd(self, pkgnames, cmd="install", pip_args=None): if pip_args is None: pip_args = [] try: from setuptools import find_packages import pip except ImportError as ie: glob_logger.error(ie.msg) pip_args.append(cmd) if isinstance(pkgnames, str): pip_args.append(pkgnames) else: ## concatenate the lists pip_args += [pkg for pkg in pkgnames] msg = "Running pip " + " ".join(pip_args) glob_logger.info(msg) try: import pip pip.main(initial_args=pip_args) except ImportError as ie: self.logger.error("Unable to import pip") raise ie
def test_and_set_nested(host, timeout=600): """ Verifies that the host has nested virtualization set for kvm module :param host: :param timeout: :return: ProcessResult """ cmd = "cat /sys/module/kvm_intel/parameters/nested" res = Command(cmd, host=host)(showout=False) if res.output.strip() != "Y": # Reboot the masters machine glob_logger.info("rebooting {} to set nested param".format(host)) rebooter(host, timeout=timeout) time.sleep(45) # Fudge factor here... pinger(host, timeout=timeout) # After reboot, make sure nested support is there path = "/sys/module/kvm_intel/parameters/nested" cmd = "cat {}".format(path) try: res = Command(cmd, host=host)(showout=False) if res.output.strip() != "Y": glob_logger.error("{}={}".format(path, res.output.strip())) raise sce.ConfigException("Nested support still not enabled") except CommandException: raise sce.ConfigException("Nested support still not enabled") return res
def _poll_for_status(instance, status, poll_interval=2, timeout=300, log=False): """ Polls for the status of a nova instance :param instance: The nova instance object to poll :param status: What status to check for. If "deleted", polls until the instance has been deleted :param poll_interval: :param timeout: :return: """ start_time = time.time() def timer(): endtime = start_time + timeout if timeout is None: return True else: timenow = time.time() check = endtime > timenow return check achieved = False while timer(): try: instance.get() except NotFound as nf: if status == "deleted": achieved = True break else: raise nf except AttributeError as ae: if status == "deleted": achieved = True break else: raise ae else: if instance.status == "ERROR": if status == "ERROR": achieved = True else: glob_logger.error("Failed to boot instance") break if instance.status != status: if log: msg = "Checking for {} on {}: status is {}" msg = msg.format(status, instance.name, instance.status) glob_logger.info(msg) time.sleep(poll_interval) else: achieved = True break return achieved
def copy_settings(self): settings_p = "/etc/openstack-dashboard/" src = "local_settings.rpmnew" shutil.copy(src, "local_settings") for host in self.hosts: dest = "root@{}:{}".format(host, settings_p) res = scp(src, dest) if res != 0: glob_logger.error("Could not copy local_settings to remote")
def _upgrade(self, commands): # Make sure to stop the service first for host in self.hosts: cmds = [Command(x, host=host) for x in commands] for cmd in cmds: glob_logger.info("Calling: {}".format(cmd.cmd)) try: res = cmd() if res != 0: glob_logger.error("Unable to run {}".format(cmd.cmd)) except: cmdstr = cmd.cmd glob_logger.error("Could not execute {}".format(cmdstr)) state = self.get_service_state() if "active" not in state: raise Exception("Service {} did not come up".format(self.name))
def backup_settings(self): settings_p = "/etc/openstack-dashboard/" settings_f = "local_settings" settings_full = os.path.join(settings_p, settings_f) for host in self.hosts: src = "root@{}:{}".format(host, settings_full) scp(src, ".") shutil.move(settings_f, settings_f + ".old") shutil.copy(settings_f, settings_f + ".rpmnew") # Check that we have ALLOWED_HOSTS found = get_cfg("ALLOWED_HOSTS", "local_settings.rpmnew") found = filter(lambda x: x.comment is None, found) if found and found[0].val: pass else: glob_logger.error("Need to correct ALLOWED_HOSTS") sys.exit(1)
def boot_pinned_instances(self, pin_flavor): pin_instance = self.numa.boot_instance(flv=pin_flavor, name="pin_test") active = smog.nova.poll_status(pin_instance, "ACTIVE") if not active: glob_logger.error("FAIL: The pinned instance could not be created")
# Parse our command line arguments args = parser.parse_args() ram, vcpus, disk = args.flavor.split(",") if args.key is not None: pairs = [item.split("=") for item in args.key] specs = {k: v for k, v in pairs} username = args.username tenant_name = args.tenant_name auth_url = args.auth_url password = args.password creds = {"username": username, "tenant_name": tenant_name, "auth_url": auth_url, "password": password} missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error("Must supply --{} or have value in environment".format(m)) raise ArgumentError("Argument {} not supplied for credentials".format(m)) numa = NUMA(**creds) # Create a NUMA object numa.clean() # make sure we have a clean system # Create a new flavor that will have the extra specs we need numa_flavor = numa.create_flavor("numa_flavor", ram=ram, vcpus=vcpus, disksize=disk, specs=None) # Modify the flavor with the appropriate extra_specs numa_flavor = numa.create_numa_topo_extra_specs(flv=numa_flavor, numa_nodes=1) # Now we have a flavor with 2 NUMA nodes defined. You can display the extra_specs extra_specs = numa_flavor.get_keys() glob_logger.info(str(extra_specs))
pairs = [item.split("=") for item in args.key] specs = {k: v for k, v in pairs} username = args.username tenant_name = args.tenant_name auth_url = args.auth_url password = args.password creds = { "username": username, "tenant_name": tenant_name, "auth_url": auth_url, "password": password } missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error( "Must supply --{} or have value in environment".format(m)) raise ArgumentError("Argument {} not supplied for credentials".format(m)) numa = NUMA(**creds) # Create a NUMA object numa.clean() # make sure we have a clean system # Create a new flavor that will have the extra specs we need numa_flavor = numa.create_flavor("numa_flavor", ram=ram, vcpus=vcpus, disksize=disk, specs=None) # Modify the flavor with the appropriate extra_specs numa_flavor = numa.create_numa_topo_extra_specs(flv=numa_flavor, numa_nodes=1)
watcher.close() # close all our threads (TODO: close automatically) cmpt = host alias_res = pci.set_pci_alias(cmpt, alias_name, v_id, p_id) white_res = pci.set_pci_whitelist(cmpt, v_id, p_id, "./nova.conf") filter_res = pci.set_pci_filter(cmpt, "./nova.conf") src = "./nova.conf" dest = "root@{}:/etc/nova/nova.conf".format(cmpt) res = scp(src, dest) # TODO: Add the NUMATopologyFilter to the default_scheduler_filter list nova_conf = get_nova_conf(host) lines = get_cfg("scheduler_default_filters", nova_conf) lines_ = list(filter(lambda l: l.comment is None, lines)) if not lines_: glob_logger.error("Unable to get") # restart nova pci.openstack_service(cmpt, "restart", "nova") # Setup example creds = read_rc_file(args.server, "/root/keystonerc_admin") # Now, we create a PCI flavor and attempt to boot numa = NUMA(**creds) flv = numa.create_flavor("pci_small", ram=512, vcpus=1) pci_pass_flv = numa.create_pci_flavor(alias_name, flv=flv) glob_logger.info(str(pci_pass_flv.get_keys())) guest = numa.boot_instance(flv=pci_pass_flv, name="pci-testing")
def check_args(self, creds): missing = [k for k, v in creds.items() if v is None] for m in missing: glob_logger.error("Must supply --{} or have value in environment".format(m)) raise ArgumentError("Argument {} not supplied for credentials".format(m))