Example #1
0
 def testinterface(self):
     '''
      test the interface
     '''
     if_down = "ifconfig %s down" % self.interface
     if_up = "ifconfig %s up" % self.interface
     # down the interface
     process.system(if_down, shell=True)
     # check the status of interface through ethtool
     ret = process.system_output(self.eth, shell=True)
     if 'yes' in ret:
         self.fail("interface test failed")
     # check the status of interface through ip link show
     ip_link = "ip link show %s | head -1" % self.interface
     ret = process.system_output(ip_link, shell=True)
     if 'UP' in ret:
         self.fail("interface test failed")
     # up the interface
     process.system(if_up, shell=True)
     time.sleep(4)
     # check the status of interface through ethtool
     ret = process.system_output(self.eth, shell=True)
     if 'no' in ret:
         self.fail("interface test failed")
     # check the status of interface through ip link show
     ret = process.system_output(ip_link, shell=True)
     if 'DOWN' in ret:
         self.fail("interface test failed")
Example #2
0
    def set_chap_auth_target(self):
        """
        set up authentication information for every single initiator,
        which provides the capability to define common login information
        for all Endpoints in a TPG
        """
        auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
        attr_cmd = ("set attribute %s %s %s" %
                    ("demo_mode_write_protect=0",
                     "generate_node_acls=1",
                     "cache_dynamic_acls=1"))
        process.system(auth_cmd + attr_cmd)

        # Set userid
        userid_cmd = "%s set auth userid=%s" % (auth_cmd, self.chap_user)
        output = process.system_output(userid_cmd)
        if self.chap_user not in output:
            raise exceptions.TestFail("Failed to set user. (%s)" % output)

        # Set password
        passwd_cmd = "%s set auth password=%s" % (auth_cmd, self.chap_passwd)
        output = process.system_output(passwd_cmd)
        if self.chap_passwd not in output:
            raise exceptions.TestFail("Failed to set password. (%s)" % output)

        # Save configuration
        process.system("targetcli / saveconfig")
 def test(self):
     self.log.info(
         "===============Executing hwinfo tool test===============")
     list = self.params.get('list', default=['--all', '--cpu', '--disk'])
     for list_item in list:
         cmd = "hwinfo %s" % list_item
         self.run_cmd(cmd)
     disk_name = process.system_output("df -h | egrep '(s|v)d[a-z][1-8]' | "
                                       "tail -1 | cut -d' ' -f1",
                                       shell=True).strip("12345")
     self.run_cmd("hwinfo --disk --only %s" % disk_name)
     Unique_Id = process.system_output("hwinfo --disk --only %s | "
                                       "grep 'Unique' | head -1 | "
                                       "cut -d':' -f2" % disk_name, shell=True)
     self.run_cmd("hwinfo --disk --save-config %s" % Unique_Id)
     self.run_cmd("hwinfo --disk --show-config %s" % Unique_Id)
     self.run_cmd("hwinfo --verbose --map")
     self.run_cmd("hwinfo --all --log FILE")
     if (not os.path.exists('./FILE')) or (os.stat("FILE").st_size == 0):
         self.log.info("--log option failed")
         self.is_fail += 1
     self.run_cmd("hwinfo --dump-db 0")
     self.run_cmd("hwinfo --dump-db 1")
     self.run_cmd("hwinfo --version")
     self.run_cmd("hwinfo --help")
     self.run_cmd("hwinfo --debug 0 --disk --log=-")
     self.run_cmd("hwinfo --short --block")
     self.run_cmd("hwinfo --disk --save-config=all")
     if "failed" in process.system_output("hwinfo --disk --save-config=all | "
                                          "grep failed | tail -1", shell=True):
         self.is_fail += 1
         self.log.info("--save-config option failed")
     if self.is_fail >= 1:
         self.fail("%s command(s) failed in hwinfo tool verification" %
                   self.is_fail)
    def setUp(self):
        """
        Verifies if powerpc-utils is installed, and gets current SMT value.
        """
        if 'ppc' not in distro.detect().arch:
            self.cancel("Processor is not ppc64")
        if SoftwareManager().check_installed("powerpc-utils") is False:
            if SoftwareManager().install("powerpc-utils") is False:
                self.cancel("powerpc-utils is not installing")
        smt_op = process.system_output("ppc64_cpu --smt")
        if "is not SMT capable" in smt_op:
            self.cancel("Machine is not SMT capable")
        if "Inconsistent state" in smt_op:
            self.cancel("Machine has mix of ST and SMT cores")

        self.curr_smt = process.system_output("ppc64_cpu --smt | awk -F'=' \
                '{print $NF}' | awk '{print $NF}'", shell=True)
        self.smt_subcores = 0
        if os.path.exists("/sys/devices/system/cpu/subcores_per_core"):
            self.smt_subcores = 1
        self.failures = 0
        self.failure_message = "\n"
        self.smt_values = {1: "off"}
        self.key = 0
        self.value = ""
        self.max_smt_value = 4
        if cpu.get_cpu_arch().lower() == 'power8':
            self.max_smt_value = 8
        if cpu.get_cpu_arch().lower() == 'power6':
            self.max_smt_value = 2
 def tearDown(self):
     """
     Sets back SMT to original value as was before the test.
     """
     process.system_output("%s=%s" % (self.smt_str,
                                      self.curr_smt), shell=True)
     process.system_output("dmesg")
Example #6
0
 def setUp(self):
     """
     Build 'nvme-cli' and setup the device.
     """
     self.device = self.params.get('device', default='/dev/nvme0')
     cmd = 'ls %s' % self.device
     if process.system(cmd, ignore_status=True) is not 0:
         self.skip("%s does not exist" % self.device)
     locations = ["https://github.com/linux-nvme/nvme-cli/archive/"
                  "master.zip"]
     tarball = self.fetch_asset("nvme-cli.zip", locations=locations,
                                expire='15d')
     archive.extract(tarball, self.srcdir)
     os.chdir("%s/nvme-cli-master" % self.srcdir)
     process.system("./NVME-VERSION-GEN", ignore_status=True)
     if process.system_output("cat NVME-VERSION-FILE").strip("\n").\
         split()[-1] != process.system_output("nvme version").\
             strip("\n").split()[-1]:
         build.make(".")
         build.make(".", extra_args='install')
     self.id_ns = self.create_namespace()
     self.log.info(self.id_ns)
     cmd = "nvme id-ns %s | grep 'in use' | awk '{print $5}' | \
         awk -F':' '{print $NF}'" % self.id_ns
     self.format_size = process.system_output(cmd, shell=True).strip('\n')
     self.format_size = pow(2, int(self.format_size))
     cmd = "nvme id-ns %s | grep 'in use' | awk '{print $2}'" % self.id_ns
     self.lba = process.system_output(cmd, shell=True).strip('\n')
Example #7
0
    def hugepage_assign(hp_num, target_ip='', node='', hp_size='', user='',
                        password=''):
        """
        Allocates hugepages for src and dst machines

        :param hp_num: number of hugepages
        :param target_ip: ip address of destination machine
        :param node: numa node to which HP have to be allocated
        :param hp_size: hugepage size
        :param user: remote machine's username
        :param password: remote machine's password
        """
        command = ""
        if node == '':
            if target_ip == '':
                utils_memory.set_num_huge_pages(int(hp_num))
            else:
                command = "echo %s > /proc/sys/vm/nr_hugepages" % (hp_num)
        else:
            command = "echo %s > /sys/devices/system/node/node" % (hp_num)
            command += "%s/hugepages/hugepages-%skB/" % (str(node), hp_size)
            command += "nr_hugepages"
        if command != "":
            if target_ip != "":
                server_session = remote.wait_for_login('ssh', target_ip, '22',
                                                       user, password,
                                                       r"[\#\$]\s*$")
                cmd_output = server_session.cmd_status_output(command)
                server_session.close()
                if (cmd_output[0] != 0):
                    raise error.TestNAError("HP not supported/configured")
            else:
                process.system_output(command, verbose=True, shell=True)
Example #8
0
    def check_ipt_rules(check_ipv4=True, check_ipv6=False):
        """
        Check iptables for network/interface
        """
        br_name = ast.literal_eval(net_bridge)["name"]
        net_forward = ast.literal_eval(params.get("net_forward", "{}"))
        net_ipv4 = params.get("net_ipv4")
        net_ipv6 = params.get("net_ipv6")
        ipt_rules = ("FORWARD -i {0} -o {0} -j ACCEPT".format(br_name),
                     "FORWARD -o %s -j REJECT --reject-with icmp" % br_name,
                     "FORWARD -i %s -j REJECT --reject-with icmp" % br_name)
        net_dev_in = ""
        net_dev_out = ""
        if net_forward.has_key("dev"):
            net_dev_in = " -i %s" % net_forward["dev"]
            net_dev_out = " -o %s" % net_forward["dev"]
        if check_ipv4:
            ipv4_rules = list(ipt_rules)
            ctr_rule = ""
            nat_rules = []
            if net_forward.has_key("mode") and net_forward["mode"] == "nat":
                nat_port = ast.literal_eval(params.get("nat_port"))
                p_start = nat_port["start"]
                p_end = nat_port["end"]
                ctr_rule = " -m .* RELATED,ESTABLISHED"
                nat_rules = [("POSTROUTING -s {0} ! -d {0} -p tcp -j MASQUERADE"
                              " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)),
                             ("POSTROUTING -s {0} ! -d {0} -p udp -j MASQUERADE"
                              " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)),
                             ("POSTROUTING -s {0} ! -d {0} -p udp"
                              " -j MASQUERADE".format(net_ipv4))]
            if nat_rules:
                ipv4_rules.extend(nat_rules)
            if (net_ipv4 and net_forward.has_key("mode") and
                    net_forward["mode"] in ["nat", "route"]):
                rules = [("FORWARD -d %s%s -o %s%s -j ACCEPT"
                          % (net_ipv4, net_dev_in, br_name, ctr_rule)),
                         ("FORWARD -s %s -i %s%s -j ACCEPT"
                          % (net_ipv4, br_name, net_dev_out))]
                ipv4_rules.extend(rules)

            output = process.system_output('iptables-save')
            logging.debug("iptables: %s", output)
            for ipt in ipv4_rules:
                if not re.findall(r"%s" % ipt, output, re.M):
                    test.fail("Can't find iptable rule:\n%s" % ipt)
        if check_ipv6:
            ipv6_rules = list(ipt_rules)
            if (net_ipv6 and net_forward.has_key("mode") and
                    net_forward["mode"] in ["nat", "route"]):
                rules = [("FORWARD -d %s%s -o %s -j ACCEPT"
                          % (net_ipv6, net_dev_in, br_name)),
                         ("FORWARD -s %s -i %s%s -j ACCEPT"
                          % (net_ipv6, br_name, net_dev_out))]
                ipv6_rules.extend(rules)
            output = process.system_output("ip6tables-save")
            logging.debug("iptables: %s", output)
            for ipt in ipv6_rules:
                if not output.count(ipt):
                    test.fail("Can't find ipbtable rule:\n%s" % ipt)
Example #9
0
 def setup_ip(self):
     '''
     set up the IP config
     '''
     if 'setup' in str(self.name):
         interface = self.host_interfaces[0]
     else:
         interface = self.bond_name
     msg = "ip addr show  | grep %s | grep -oE '[^ ]+$'"\
           % self.peer_first_ipinterface
     cmd = "ssh %s@%s %s" % (self.user, self.peer_first_ipinterface, msg)
     self.peer_first_interface = process.system_output(cmd,
                                                       shell=True).strip()
     if self.peer_first_interface == "":
         self.fail("test failed because peer interface can not retrieved")
     self.peer_ips = [self.peer_first_ipinterface]
     self.local_ip = netifaces.ifaddresses(interface)[2][0]['addr']
     self.peer_interfaces.insert(0, self.peer_first_interface)
     self.net_mask = []
     stf = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     for val1, val2 in map(None, [interface], [self.local_ip]):
         mask = ""
         if val2:
             tmp = fcntl.ioctl(stf.fileno(), 0x891b, struct.pack('256s',
                                                                 val1))
             mask = socket.inet_ntoa(tmp[20:24]).strip('\n')
         self.net_mask.append(mask)
     cmd = "route -n | grep %s | grep -w UG | awk "\
           "'{ print $2 }'" % interface
     self.gateway = process.system_output(
         '%s' % cmd, shell=True)
 def test(self):
     """
     Validate the number of cpu idle states against device tree
     """
     for var in range(1, 10):
         cpu_num = random.choice(cpu.cpu_online_list())
         self.log.info("--------CPU: %s--------" % cpu_num)
         states = process.system_output("cpupower -c %s idle-info --silent"
                                        " | grep 'Number of idle states:' |"
                                        "awk '{print $5}'"
                                        % cpu_num, shell=True)
         cpu_idle_states = []
         for i in range(1, int(states)):
             val = process.system_output("cat /sys/devices/system/cpu/"
                                         "cpu%s/cpuidle/state%s/"
                                         "name" % (cpu_num, i))
             if 'power8' in cpu.get_cpu_arch():
                 val = self.set_idle_states(val)
             cpu_idle_states.append(val)
         devicetree_list = self.read_from_device_tree()
         res = cmp(cpu_idle_states, devicetree_list)
         if res == 0:
             self.log.info("PASS : Validated the idle states")
         else:
             self.log.info(" cpupower tool : %s and device tree"
                           ": %s" % (cpu_idle_states, devicetree_list))
             self.fail("FAIL: Please check the idle states")
 def test1_vpdupdate(self):
     self.log.info("===============Executing vpdupdate tool test===="
                   "===========")
     self.run_cmd("vpdupdate")
     list = ['--help', '--version', '--archive', '--scsi']
     for list_item in list:
         cmd = "vpdupdate %s" % list_item
         self.run_cmd(cmd)
     path_db = process.system_output("find /var/lib/lsvpd/ -iname vpd.db | "
                                     "head -1", shell=True).strip()
     if path_db:
         copyfile_path = os.path.join(self.outputdir, 'vpd.db')
         shutil.copyfile(path_db, copyfile_path)
         self.run_cmd("vpdupdate --path=%s" % copyfile_path)
     if os.path.exists('/var/lib/lsvpd/run.vpdupdate'):
         path = '/var/lib/lsvpd/run.vpdupdate'
     elif os.path.exists('/run/run.vpdupdate'):
         path = '/run/run.vpdupdate'
     move_path = '/root/run.vpdupdate'
     shutil.move(path, move_path)
     self.log.info("Running vpdupdate after removing run.vpdupdate")
     self.run_cmd("vpdupdate")
     shutil.move(move_path, path)
     process.run("rm -f /var/lib/lsvpd/vpd.db; touch /var/lib/lsvpd/vpd.db",
                 shell=True)
     for command in ["lsvpd", "lscfg", "lsmcode"]:
         if not process.system_output("%s | grep run | grep vpdupdate" % command,
                                      shell=True, ignore_status=True).strip():
             self.fail("Error message is not displayed when vpd.db is corrupted.")
     self.run_cmd("vpdupdate")
     if self.is_fail >= 1:
         self.fail("%s command(s) failed in vpdupdate tool "
                   "verification" % self.is_fail)
Example #12
0
    def setUp(self):
        '''
        To check and install dependencies for the test
        '''
        smm = SoftwareManager()
        if not smm.check_installed("bridge-utils") and not smm.install("bridge-utils"):
            self.cancel("bridge-utils package is need to test")

        self.host_interface = self.params.get("interface",
                                              default=None)
        if not self.host_interface:
            self.cancel("User should specify host interface")

        interfaces = netifaces.interfaces()
        if self.host_interface not in interfaces:
            self.cancel("Interface is not available")

        self.peer_ip = self.params.get("peer_ip", default=None)
        if not self.peer_ip:
            self.cancel("User should specify peer IP")

        cmd = "ip addr show %s | sed -nr 's/.*inet ([^ ]+)."\
            "*/\\1/p'" % self.host_interface
        self.cidr = process.system_output(
            '%s' % cmd, shell=True)
        cmd = "route -n | grep %s | grep -w UG | awk "\
            "'{ print $2 }'" % self.host_interface
        self.gateway = process.system_output(
            '%s' % cmd, shell=True)
        cmd = "ip addr show %s | grep inet | grep brd | "\
            "awk '{ print $4 }'" % self.host_interface
        self.broadcast = process.system_output(
            '%s' % cmd, shell=True)
 def setUp(self):
     '''
     To check and install dependencies for the test
     '''
     smm = SoftwareManager()
     pkgs = ["net-tools"]
     detected_distro = distro.detect()
     if detected_distro.name == "Ubuntu":
         pkgs.extend(["openssh-client", "iputils-ping"])
     elif detected_distro.name == "SuSE":
         pkgs.extend(["openssh", "iputils"])
     else:
         pkgs.extend(["openssh-clients", "iputils"])
     for pkg in pkgs:
         if not smm.check_installed(pkg) and not smm.install(pkg):
             self.cancel("%s package is need to test" % pkg)
     interfaces = netifaces.interfaces()
     self.iface = self.params.get("interface")
     if self.iface not in interfaces:
         self.cancel("%s interface is not available" % self.iface)
     self.peer = self.params.get("peer_ip", default="")
     if self.peer == "":
         self.cancel("peer ip should specify in input")
     self.user = self.params.get("user_name", default="root")
     msg = "ip addr show  | grep %s | grep -oE '[^ ]+$'" % self.peer
     cmd = "ssh %s@%s \"%s\"" % (self.user, self.peer, msg)
     self.peerif = process.system_output(cmd, shell=True).strip()
     if self.peerif == "":
         self.cancel("unable to get peer interface")
     cmd = "ip -f inet -o addr show %s | awk '{print $4}' | cut -d / -f1"\
           % self.iface
     self.local_ip = process.system_output(cmd, shell=True).strip()
     if self.local_ip == "":
         self.cancel("unable to get local ip")
Example #14
0
    def get_target_id(self):
        """
        Get target id from image name.
        """
        cmd = "targetcli ls /iscsi 1"
        target_info = process.system_output(cmd)
        target = None
        for line in re.split("\n", target_info)[1:]:
            if re.findall("o-\s\S+\s[\.]+\s\[TPGs:\s\d\]$", line):
                # eg: iqn.2015-05.com.example:iscsi.disk
                try:
                    target = re.findall("iqn[\.]\S+:\S+", line)[0]
                except IndexError:
                    logging.info("No found target in %s", line)
                    continue
            else:
                continue

            cmd = "targetcli ls /iscsi/%s/tpg1/luns" % target
            luns_info = process.system_output(cmd)
            for lun_line in re.split("\n", luns_info):
                if re.findall("o-\slun\d+", lun_line):
                    if self.emulated_image in lun_line:
                        break
                    else:
                        target = None
        return target
 def testinterface(self):
     '''
      test the interface
     '''
     if_down = "ip link set %s down" % self.iface
     if_up = "ip link set %s up" % self.iface
     # down the interface
     process.system(if_down, shell=True)
     # check the status of interface through ethtool
     ret = process.system_output(self.eth, shell=True)
     if 'yes' in ret:
         self.fail("interface test failed")
     # check the status of interface through ip link show
     ip_link = "ip link show %s | head -1" % self.iface
     ret = process.system_output(ip_link, shell=True)
     if 'UP' in ret:
         self.fail("interface test failed")
     # up the interface
     process.system(if_up, shell=True, ignore_status=True)
     self.log.info('Checking for interface status using ip link show')
     if not self.interface_wait(ip_link):
         self.fail("interface test failed")
     # check the status of interface through ethtool
     self.log.info('Checking for interface status using Ethtool')
     if not self.interface_wait(self.eth):
         self.fail("interface test failed")
    def setUp(self):
        """
        Build 'nvme-cli' and setup the device.
        """
        self.device = self.params.get('device', default='/dev/nvme0')
        cmd = 'ls %s' % self.device
        if process.system(cmd, ignore_status=True) is not 0:
            self.cancel("%s does not exist" % self.device)
        smm = SoftwareManager()
        if not smm.check_installed("nvme-cli") and not \
                smm.install("nvme-cli"):
            self.cancel('nvme-cli is needed for the test to be run')
        self.namespace = self.params.get('namespace', default='1')
        self.id_ns = "%sn%s" % (self.device, self.namespace)
        cmd = "nvme id-ns %s | grep 'in use' | awk '{print $5}' | \
            awk -F':' '{print $NF}'" % self.id_ns
        self.format_size = process.system_output(cmd, shell=True).strip('\n')
        self.format_size = pow(2, int(self.format_size))
        cmd = "nvme id-ns %s | grep 'in use' | awk '{print $2}'" % self.id_ns
        self.lba = process.system_output(cmd, shell=True).strip('\n')
        self.firmware_url = self.params.get('firmware_url', default='')
        if 'firmware_upgrade' in str(self.name) and not self.firmware_url:
            self.cancel("firmware url not gien")

        test_dic = {'compare': 'Compare', 'formatnamespace': 'Format NVM',
                    'dsm': 'Data Set Management',
                    'writezeroes': 'Write Zeroes',
                    'firmware_upgrade': 'FW Commit and Download',
                    'writeuncorrectable': 'Write Uncorrectable'}
        for key, value in test_dic.iteritems():
            if key in str(self.name):
                cmd = "nvme id-ctrl %s -H" % self.id_ns
                if "%s Supported" % value not in \
                        process.system_output(cmd, shell=True):
                    self.cancel("%s is not supported" % value)
    def test_hostname(self):
        """
        i.Verifies hostname command options and
        ii.Test to change the hostname
        """
        output = process.system_output("hostname")
        if not output:
            self.fail("unexpected response from hostname command")

        # Verifying different options provided by hostname
        options_to_verify = self.params.get('hostname_opt', default="f")
        for option in options_to_verify:
            ret = process.run("hostname -%s" % option, ignore_status=True)
            if ret.exit_status:
                self.fail("Hostname reported non-zero status %s for option %s"
                          % (ret.exit_status, option))
            if not ret.stdout:
                self.fail("No output for %s option" % (option))

        # Test to change hostname
        myhostname_file = os.path.join(self.workdir, "MYHOSTNAME")
        myhostname = "myhost.my-domain"
        if myhostname == self.hostname:
            myhostname += '1'
        with open(myhostname_file, 'w') as fobj:
            fobj.write(myhostname)

        process.system("hostname -F %s" % myhostname_file,
                       sudo=True)
        self.restore_hostname = True
        if myhostname not in process.system_output("hostname",
                                                   env={"LANG": "C"}):
            self.fail("unexpected response from hostname -F command and " +
                      "hostname -F didn't set hostname")
 def setUp(self):
     """
     Get the number of cores and threads per core
     Set the SMT value to 4/8
     """
     if 'ppc' not in platform.processor():
         self.cancel("Processor is not ppc64")
     self.nfail = 0
     self.CORES = process.system_output("lscpu | grep 'Core(s) per socket:'"
                                        "| awk '{print $4}'", shell=True)
     self.SOCKETS = process.system_output("lscpu | grep 'Socket(s):'"
                                          "| awk '{print $2}'", shell=True)
     self.THREADS = process.system_output("lscpu | grep 'Thread(s) per core"
                                          ":'| awk '{print $4}'",
                                          shell=True)
     self.T_CORES = int(self.CORES) * int(self.SOCKETS)
     self.log.info(" Cores = %s and threads = %s "
                   % (self.T_CORES, self.THREADS))
     process.system("echo 8 > /proc/sys/kernel/printk", shell=True,
                    ignore_status=True)
     self.max_smt = 4
     if cpu.get_cpu_arch().lower() == 'power8':
         self.max_smt = 8
     if cpu.get_cpu_arch().lower() == 'power6':
         self.max_smt = 2
     process.system_output("ppc64_cpu --smt=%s" % self.max_smt, shell=True)
     self.path = "/sys/devices/system/cpu"
    def test3_lscfg(self):
        self.log.info("===============Executing lscfg tool test============="
                      "==")
        self.run_cmd("lscfg")
        list = ['--debug', '--version', '-p']
        device = process.system_output('lscfg', shell=True).splitlines()[-1]
        if device.startswith("+"):
            list.append("-l%s" % device.split(" ")[1])

        for list_item in list:
            cmd = "lscfg %s" % list_item
            self.run_cmd(cmd)
        path_db = process.system_output("find /var/lib/lsvpd/ -iname vpd.db | "
                                        "head -1", shell=True).strip()
        if path_db:
            copyfile_path = os.path.join(self.outputdir, 'vpd.db')
            shutil.copyfile(path_db, copyfile_path)
            self.run_cmd("lscfg --data=%s" % copyfile_path)
        path_tar = process.system_output("find /var/lib/lsvpd/ -iname vpd.*.gz"
                                         " | head -1", shell=True).strip()
        if path_tar:
            self.run_cmd("lscfg --zip=%s" % path_tar)
        if self.is_fail >= 1:
            self.fail("%s command(s) failed in lscfg tool verification"
                      % self.is_fail)
Example #20
0
 def get_user_ugid(username):
     """
     return user uid and gid as a list
     """
     user_uid = process.system_output("id -u %s" % username).split()
     user_gid = process.system_output("id -g %s" % username).split()
     return(user_uid, user_gid)
Example #21
0
 def test_sanitize(self):
     """
     sanitize output(remove sensitive information like serial numbers,etc.)
     """
     out_with_sanitize = process.system_output("lshw -sanitize")
     for line in process.system_output("lshw").strip('\t\n\r').splitlines():
         if ("serial:" in line) and (line in out_with_sanitize):
             self.fail("Sensitive data is present in output")
 def tearDown(self):
     """
     Sets back SMT to original value as was before the test.
     Sets back cpu states to online
     """
     process.system_output(
         "ppc64_cpu --smt=off && ppc64_cpu --smt=on && ppc64_cpu --smt=%s" % self.curr_smt, shell=True)
     self.__online_cpus(totalcpus)
 def threads_per_core(self):
     """
     Tests the threads per core in ppc64_cpu command.
     """
     op1 = process.system_output(
         "ppc64_cpu --threads-per-core", shell=True).strip().split()[-1]
     op2 = process.system_output("ppc64_cpu --info", shell=True)
     op2 = len(op2.strip().splitlines()[0].split(":")[-1].split())
     self.equality_check("Threads per core", op1, op2)
 def setUp(self):
     self.tunnel = None
     ret = process.system_output("ps -aef", env={"LANG": "C"})
     if 'dhclient' in ret:
         self.cancel("Test not supported on systems running dhclient")
     install_dependencies()
     pre = process.system_output("iptunnel show")
     if "sit1" in pre:
         self.cancel("'sit1' already configured in iptunnel: %s" % pre)
Example #25
0
    def get_mlock_unevictable(mlock_cmd, unevictable_cmd):
        """
        Get nr_mlock and nr_unevictable in host

        :param mlock_cmd: CMD to get nr_mlock
        :param unevictable_cmd: CMD to get nr_unevictable
        """
        mlock = int(process.system_output(mlock_cmd).split().pop())
        unevictable = int(process.system_output(unevictable_cmd).split().pop())
        return mlock, unevictable
Example #26
0
def glusterd_start():
    """
    Check for glusterd status and start it
    """
    cmd = "service glusterd status"
    output = decode_to_text(process.system_output(cmd, ignore_status=True))
    # The blank before 'active' makes a distinction with 'inactive'
    if ' active' not in output or 'running' not in output:
        cmd = "service glusterd start"
        error_context.context("Starting gluster dameon failed")
        output = decode_to_text(process.system_output(cmd))
    def setUp(self):
        """
        Build 'HTX'.
        """
        if 'ppc64' not in process.system_output('uname -a', shell=True):
            self.cancel("Platform does not supports")

        if distro.detect().name != 'Ubuntu':
            self.cancel("Distro does not support")

        self.mdt_file = self.params.get('mdt_file', default='mdt.hd')
        self.time_limit = int(self.params.get('time_limit', default=2)) * 3600
        self.block_devices = self.params.get('disk', default=None)
        if self.block_devices is None:
            self.cancel("Needs the block devices to run the HTX")
        self.block_device = []
        for disk in self.block_devices.split():
            self.block_device.append(disk.rsplit("/")[-1])
        self.block_device = " ".join(self.block_device)

        packages = ['git', 'gcc', 'make', 'libncurses5', 'g++', 'libdapl-dev',
                    'ncurses-dev', 'libncurses-dev', 'libcxl-dev']
        smm = SoftwareManager()
        for pkg in packages:
            if not smm.check_installed(pkg) and not smm.install(pkg):
                self.cancel("Can not install %s" % pkg)

        url = "https://github.com/open-power/HTX/archive/master.zip"
        tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d')
        archive.extract(tarball, self.teststmpdir)
        htx_path = os.path.join(self.teststmpdir, "HTX-master")
        os.chdir(htx_path)

        build.run_make(htx_path, extra_args='all')
        build.run_make(htx_path, extra_args='deb')
        process.run('dpkg -r htxubuntu')
        process.run('dpkg --purge htxubuntu')
        process.run('dpkg -i htxubuntu.deb')
        if not os.path.exists("/usr/lpp/htx/mdt/%s" % self.mdt_file):
            self.cancel("MDT file %s not found" % self.mdt_file)
        self.smt = self.params.get('smt_change', default=False)
        if self.smt:
            self.max_smt_value = 8
            if cpu.get_cpu_arch().lower() == 'power7':
                self.max_smt_value = 4
            if cpu.get_cpu_arch().lower() == 'power6':
                self.max_smt_value = 2
            self.smt_values = ["off", "on"]
            for i in range(2, self.max_smt_value + 1):
                self.smt_values.append(str(i))
            self.curr_smt = process.system_output("ppc64_cpu --smt | awk -F'=' \
                '{print $NF}' | awk '{print $NF}'", shell=True)
Example #28
0
 def check_installed(self, name):
     if os.path.isfile(name):
         n_cmd = (self.lowlevel_base_cmd + ' -f ' + name +
                  ' Package 2>/dev/null')
         name = process.system_output(n_cmd)
     i_cmd = (self.lowlevel_base_cmd + "--show -f='${Status}' " +
              name + ' 2>/dev/null')
     # Checking if package is installed
     package_status = process.system_output(i_cmd, ignore_status=True)
     dpkg_not_installed = (package_status != self.INSTALLED_OUTPUT)
     if dpkg_not_installed:
         return False
     return True
Example #29
0
    def setUp(self):
        """
        Build 'HTX'.
        """
        if 'ppc64' not in process.system_output('uname -a', shell=True):
            self.cancel("Supported only on Power Architecture")

        detected_distro = distro.detect()
        self.mdt_file = self.params.get('mdt_file', default='mdt.mem')
        self.time_limit = int(self.params.get('time_limit', default=2)) * 3600
        self.smt = self.params.get('smt_change', default=False)

        packages = ['git', 'gcc', 'make']
        if detected_distro.name in ['centos', 'fedora', 'rhel', 'redhat']:
            packages.extend(['gcc-c++', 'ncurses-devel',
                             'dapl-devel', 'libcxl-devel'])
        elif detected_distro.name == "Ubuntu":
            packages.extend(['libncurses5', 'g++', 'libdapl-dev',
                             'ncurses-dev', 'libncurses-dev', 'libcxl-dev'])
        else:
            self.cancel("Test not supported in  %s" % detected_distro.name)

        smm = SoftwareManager()
        for pkg in packages:
            if not smm.check_installed(pkg) and not smm.install(pkg):
                self.cancel("Can not install %s" % pkg)

        url = "https://github.com/open-power/HTX/archive/master.zip"
        tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d')
        archive.extract(tarball, self.teststmpdir)
        htx_path = os.path.join(self.teststmpdir, "HTX-master")
        os.chdir(htx_path)

        build.make(htx_path, extra_args='all')
        build.make(htx_path, extra_args='tar')
        process.run('tar --touch -xvzf htx_package.tar.gz')
        os.chdir('htx_package')
        if process.system('./installer.sh -f'):
            self.fail("Installation of htx fails:please refer job.log")

        if self.smt:
            self.max_smt_value = 8
            if cpu.get_cpu_arch().lower() == 'power7':
                self.max_smt_value = 4
            if cpu.get_cpu_arch().lower() == 'power6':
                self.max_smt_value = 2
            self.smt_values = ["off", "on"]
            for i in range(2, self.max_smt_value + 1):
                self.smt_values.append(str(i))
            self.curr_smt = process.system_output("ppc64_cpu --smt | awk -F'=' \
                '{print $NF}' | awk '{print $NF}'", shell=True)
Example #30
0
    def test_lshw_verification(self):
        """
        compare the output of lshw with other tools
        which produces similar info of hardware.
        """
        # verifying mac address
        mac = process.system_output("ifconfig | grep 'ether' | "
                                    "head -1 | cut -d' ' -f10", shell=True).strip()
        if mac not in process.system_output("lshw"):
            self.fail("lshw failed to show correct mac address")

        # verify network
        if self.active_interface not in process.system_output("lshw -class network"):
            self.fail("lshw failed to show correct active network interface")
Example #31
0
 def test_basic_workflow(self):
     """
     Check the basic workflow works using ramdisk
     """
     ramdisk_filename = vg_ramdisk_dir = loop_device = None
     vg_name = "avocado_testing_vg_e5kj3erv11a"
     lv_name = "avocado_testing_lv_lk0ff33al5h"
     ramdisk_basedir = os.path.join(self.tmpdir, "foo", "bar")
     mount_loc = os.path.join(self.tmpdir, "lv_mount_location")
     os.mkdir(mount_loc)
     try:
         # Create ramdisk vg
         self.assertFalse(os.path.exists(ramdisk_basedir))
         self.assertFalse(lv_utils.vg_check(vg_name))
         spec = lv_utils.vg_ramdisk(False, vg_name, 10, ramdisk_basedir,
                                    "sparse_file")
         ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device = spec
         # Check it was created properly
         self.assertTrue(ramdisk_filename)
         self.assertTrue(vg_ramdisk_dir)
         self.assertTrue(vg_name)
         self.assertTrue(loop_device)
         self.assertTrue(os.path.exists(ramdisk_basedir))
         self.assertTrue(glob.glob(os.path.join(ramdisk_basedir, "*")))
         self.assertTrue(lv_utils.vg_check(vg_name))
         vgs = lv_utils.vg_list()
         self.assertIn(vg_name, vgs)
         # Can't create existing vg
         self.assertRaises(lv_utils.LVException, lv_utils.vg_create,
                           vg_name, loop_device)
         # Create and check LV
         lv_utils.lv_create(vg_name, lv_name, 1)
         lv_utils.lv_check(vg_name, lv_name)
         self.assertIn(vg_name, process.system_output("lvs --all",
                                                      sudo=True))
         self.assertIn(lv_name, lv_utils.lv_list())
         lv_utils.lv_mount(vg_name, lv_name, mount_loc, "ext2")
         lv_utils.lv_umount(vg_name, lv_name)
         lv_utils.lv_remove(vg_name, lv_name)
         self.assertNotIn(lv_name, lv_utils.lv_list())
         # Cleanup ramdisk vgs
         lv_utils.vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir,
                                     vg_name, loop_device)
         self.assertTrue(os.path.exists(ramdisk_basedir))
         self.assertFalse(glob.glob(os.path.join(ramdisk_basedir, "*")))
     except BaseException as details:
         try:
             process.run("mountpoint %s && umount %s"
                         % (mount_loc, mount_loc), shell=True, sudo=True)
         except BaseException as details:
             print("Fail to unmount LV: %s" % details)
         try:
             lv_utils.lv_remove(vg_name, lv_name)
         except BaseException as details:
             print("Fail to cleanup LV: %s" % details)
         try:
             lv_utils.vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir,
                                         vg_name, loop_device)
         except BaseException as details:
             print("Fail to cleanup vg_ramdisk: %s" % details)
         raise
Example #32
0
def run(test, params, env):
    """
    Test qmp event notification, this case will:
    1) Start VM with qmp enable.
    2) Connect to qmp port then run qmp_capabilities command.
    3) Initiate the qmp command defined in config (qmp_cmd)
    4) Verify that qmp command works as designed.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """

    def check_list(qmp_o, key, val=None, check_item_in_pair=True):
        """
        Check if the expect key, val are contained in QMP output qmp_o.

        :param qmp_o: output of QMP command
        :type qmp_o: list
        :param key: expect result
        :type key: str
        :param val: expect result
        :type val: str or None(if check_item_in_pair=False)
        :param check_item_in_pair: If expect result is dict (True) or str (False)
        :type check_item_in_pair: bool.

        :return check result
        :rtype: bool
        """
        for element in qmp_o:
            if isinstance(element, dict):
                if _check_dict(element, key, val, check_item_in_pair):
                    return True
            elif isinstance(element, list):
                if check_list(element, key, val, check_item_in_pair):
                    return True
            elif element != '' and not check_item_in_pair:
                if strict_match:
                    if operator.eq(key, element):
                        return True
                else:
                    if key in str(element):
                        return True
        return False

    def _check_dict(dic, key, val, check_item_in_pair=True):
        """
        Check if the expect key, val are contained in QMP output dic.

        :param dic: content of QMP command return value
        :type dic: dict
        :param key: expect result
        :type key: str
        :param val: expect result
        :type val: str or None(if check_item_in_pair=False)
        :param check_item_in_pair: If expect result is dict or str
        :type check_item_in_pair: bool. Means expect result is dict or str.

        :return check result
        :rtype: bool
        """
        if key in dic and not check_item_in_pair:
            return True
        elif key in dic and val == dic[key]:
            return True
        else:
            for value in dic.values():
                if isinstance(value, dict):
                    if _check_dict(value, key, val, check_item_in_pair):
                        return True
                elif isinstance(value, list):
                    if check_list(value, key, val, check_item_in_pair):
                        return True
                elif value != '' and not check_item_in_pair:
                    if strict_match:
                        if operator.eq(key, value):
                            return True
                    else:
                        if key in str(value):
                            return True
            return False

    def check_result(qmp_o, expect_o=None):
        """
        Check test result with difference way according to result_check.
        result_check = equal, expect_o should equal to qmp_o.
        result_check = contain, expect_o should be contained in qmp_o
        result_check = not_contain, expect_o should not be contained in qmp_o.

        :param qmp_o: output from pre_cmd, qmp_cmd or post_cmd.
        :type qmp_o: list
        :param expect_o: the expect result.
        :type expect_o: list
        """
        logging.info("Expect result is %s", expect_o)
        logging.info("Actual result that get from qmp_cmd/post_cmd is %s", qmp_o)
        if result_check == "equal":
            if not operator.eq(qmp_o, expect_o):
                test.fail("QMP output does not equal to the expect result.\n "
                          "Expect result: '%s'\n"
                          "Actual result: '%s'" % (expect_o, qmp_o))
        elif result_check == "contain":
            for o in expect_o:
                if isinstance(o, dict):
                    for key, val in o.items():
                        result = check_list(qmp_o, key, val)
                        if not result:
                            break
                elif isinstance(o, str):
                    result = check_list(qmp_o, o, check_item_in_pair=False)

                if result:
                    logging.info("QMP output contain the expect value %s", o)
                else:
                    test.fail("QMP output does not contain the expect value.\n"
                              "Missed expect value: '%s'\n"
                              "Actual result: '%s'\n" % (o, qmp_o))
        elif result_check == "not_contain":
            for o in expect_o:
                if isinstance(o, dict):
                    for key, val in o.items():
                        result = check_list(qmp_o, key, val)
                        if result:
                            break
                elif isinstance(o, str):
                    result = check_list(qmp_o, o, check_item_in_pair=False)

                if result:
                    test.fail("QMP output contain the unexpect result.\n"
                              "Unexpect result: '%s'\n"
                              "Actual result: '%s'" % (o, qmp_o))

    qemu_binary = utils_misc.get_qemu_binary(params)
    if not utils_misc.qemu_has_option("qmp", qemu_binary):
        test.cancel("Host qemu does not support qmp.")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    module = params.get("modprobe_module")
    if module:
        logging.info("modprobe the module %s", module)
        session.cmd("modprobe %s" % module)

    qmp_ports = vm.get_monitors_by_type('qmp')
    if qmp_ports:
        qmp_port = qmp_ports[0]
    else:
        test.error("Incorrect configuration, no QMP monitor found.")
    callback = {"host_cmd": lambda cmd: process.system_output(cmd, shell=True).decode(),
                "guest_cmd": session.cmd_output,
                "qmp_cmd": qmp_port.send_args_cmd}

    def send_cmd(cmd):
        """ Helper to execute command on host/ssh guest/qmp monitor """
        if cmd_type in callback.keys():
            return callback[cmd_type](cmd)
        else:
            test.error("cmd_type is not supported")

    pre_cmd = params.get("pre_cmd")
    qmp_cmd = params.get("qmp_cmd")
    post_cmd = params.get("post_cmd")
    cmd_type = params.get("event_cmd_type")
    result_check = params.get("cmd_result_check")
    strict_match = params.get("strict_match", "yes") == 'yes'
    expect_o = eval(params.get("cmd_return_value", "[]"))

    # Pre command
    if pre_cmd is not None:
        logging.info("Run prepare command '%s'.", pre_cmd)
        pre_o = send_cmd(pre_cmd)
        logging.debug("Pre-command: '%s'\n Output: '%s'", pre_cmd, pre_o)

    # qmp command
    try:
        # Testing command
        logging.info("Run qmp command '%s'.", qmp_cmd)
        qmp_o = qmp_port.send_args_cmd(qmp_cmd)
        if not isinstance(qmp_o, list):
            qmp_o = [qmp_o]
        logging.debug("QMP command: '%s' \n Output: '%s'", qmp_cmd, qmp_o)
    except qemu_monitor.QMPCmdError as err:
        if params.get("negative_test") == 'yes':
            logging.debug("Negative QMP command: '%s'\n output:'%s'", qmp_cmd,
                          err)
            if params.get("negative_check_pattern"):
                check_pattern = params.get("negative_check_pattern")
                if check_pattern not in str(err):
                    test.fail("'%s' not in exception '%s'"
                              % (check_pattern, err))
        else:
            test.fail(err)
    except qemu_monitor.MonitorProtocolError as err:
        test.fail(err)
    except Exception as err:
        test.fail(err)

    # sleep 10s to make netdev_del take effect
    if 'netdev_del' in qmp_cmd:
        time.sleep(10)

    # Post command
    if post_cmd is not None:
        logging.info("Run post command '%s'.", post_cmd)
        post_o = send_cmd(post_cmd)
        if not isinstance(post_o, list):
            post_o = [post_o]
        logging.debug("Post-command: '%s'\n Output: '%s'", post_cmd, post_o)

    if result_check == "equal" or result_check == "contain":
        logging.info("Verify qmp command '%s' works as designed.", qmp_cmd)
        if qmp_cmd == "query-name":
            vm_name = params["main_vm"]
            expect_o = [{'name': vm_name}]
        elif qmp_cmd == "query-uuid":
            uuid_input = params["uuid"]
            expect_o = [{'UUID': uuid_input}]
        elif qmp_cmd == "query-version":
            qemu_version_cmd = "rpm -qa | grep -E 'qemu-kvm(-(rhev|ma))?-[0-9]' | head -n 1"
            host_arch = platform.machine()
            qemu_version = callback["host_cmd"](qemu_version_cmd).replace('.%s' % host_arch, '')
            expect_o = [str(qemu_version)]
        elif qmp_cmd == "query-block":
            images = params['images'].split()
            image_info = {}
            for image in images:
                image_params = params.object_params(image)
                image_format = image_params['image_format']
                image_drive = "drive_%s" % image
                if vm.check_capability(Flags.BLOCKDEV):
                    image_info['node-name'] = image_drive
                else:
                    image_info['device'] = image_drive
                image_info['qdev'] = image
                image_info['format'] = image_format
                expect_o.append(image_info)
        elif qmp_cmd == "query-target":
            host_arch = platform.machine()
            if host_arch == "ppc64le":
                host_arch = host_arch[:5]
            expect_o = [{"arch": host_arch}]
        elif qmp_cmd == "query-machines":
            # Remove avocado machine type
            vm_machines = params["machine_type"].split(':', 1)[-1]
            expect_o = [{'alias': vm_machines}]
        elif qmp_cmd == "query-vnc":
            vnc_port = vm.get_vnc_port()
            expect_o = [{'service': str(vnc_port)}, {'enabled': True}, {'host': '0.0.0.0'}]
        check_result(qmp_o, expect_o)
    elif result_check.startswith("post_"):
        logging.info("Verify post qmp command '%s' works as designed.", post_cmd)
        result_check = result_check.split('_', 1)[1]
        check_result(post_o, expect_o)
    session.close()
Example #33
0
def result_sum(topdir, params, guest_ver, resultsdir, test):
    case_type = params.get("test")
    unit_std = params.get("unit_std", "M")
    no_table_list = params.get("no_table_list", "").split()
    ignore_cases = params.get("ignore_cases", "").split()
    repeatn = ""
    if "repeat" in test.outputdir:
        repeatn = re.findall(r"repeat\d+", test.outputdir)[0]
    category_key = re.split("/", test.outputdir)[-1]
    category_key = re.split(case_type, category_key)[0]
    category_key = re.sub(r"\.repeat\d+", "", category_key)

    kvm_ver = process.system_output(params.get('ver_cmd', "rpm -q qemu-kvm"),
                                    shell=True)
    host_ver = os.uname()[2]
    test.write_test_keyval({'kvm-userspace-ver': kvm_ver})
    test.write_test_keyval({'host-kernel-ver': host_ver})
    test.write_test_keyval({'guest-kernel-ver': guest_ver})
    # Find the results files

    results_files = {}
    file_list = [
        'guest_result', 'guest_monitor_result.*sum', 'host_monitor_result.*sum'
    ]
    if params.get("file_list"):
        file_list = params.get("file_list").split()

    for files in os.walk(topdir):
        if files[2]:
            for file in files[2]:
                jump_flag = False
                for ignore_case in ignore_cases:
                    if ignore_case in files[0]:
                        jump_flag = True
                if jump_flag:
                    continue
                file_dir_norpt = re.sub(r"\.repeat\d+", "", files[0])
                if (repeatn in files[0] and category_key in file_dir_norpt
                        and case_type in files[0]):
                    for i, pattern in enumerate(file_list):
                        if re.findall(pattern, file):
                            prefix = re.findall(r"%s\.[\d\w_\.]+" % case_type,
                                                file_dir_norpt)[0]
                            prefix = re.sub(r"\.|_", "--", prefix)
                            if prefix not in results_files.keys():
                                results_files[prefix] = []
                                tmp = []
                                for j in range(len(file_list)):
                                    tmp.append(None)
                                results_files[prefix] = tmp
                            tmp_file = utils_misc.get_path(files[0], file)
                            results_files[prefix][i] = tmp_file

    # Start to read results from results file and monitor file
    results_matrix = {}
    no_table_results = {}
    thread_tag = params.get("thread_tag", "thread")
    order_list = []
    for prefix in results_files:
        marks = params.get("marks", "").split()
        case_infos = prefix.split("--")
        case_type = case_infos[0]
        threads = ""
        refresh_order_list = True
        prefix_perf = prefix
        if case_type == "ffsb":
            category = "-".join(case_infos[:-1])
            threads = case_infos[-1]
        elif case_type == "qcow2perf":
            refresh_order_list = False
            if len(case_infos) > 2:
                category = "-".join(case_infos[:-2])
                thread_tag = case_infos[-2]
                threads = " "
                marks[0] = re.sub("TIME", case_infos[-1], marks[0])
            else:
                category = case_infos[-1]
                marks[0] = re.sub("TIME", case_infos[-1], marks[0])
            prefix_perf = "--".join(case_infos[:-1])
        else:
            category = "-".join(case_infos)
        if refresh_order_list:
            order_list = []
        if (category not in results_matrix.keys()
                and category not in no_table_list):
            results_matrix[category] = {}
        if threads:
            if threads not in results_matrix[category].keys():
                results_matrix[category][threads] = {}
                results_matrix["thread_tag"] = thread_tag
            tmp_dic = results_matrix[category][threads]
        elif category not in no_table_list:
            tmp_dic = results_matrix[category]

        result_context_file = open(results_files[prefix][0], 'r')
        result_context = result_context_file.read()
        result_context_file.close()
        for mark in marks:
            mark_tag, mark_key = mark.split(":")
            datas = re.findall(mark_key, result_context)
            if isinstance(datas[0], tuple):
                data = time_ana(datas[0])
            else:
                tmp_data = 0.0
                for data in datas:
                    if re.findall("[bmkg]", data, re.I):
                        data = utils_misc.normalize_data_size(data, unit_std)
                    tmp_data += float(data)
                data = str(tmp_data)
            if data:
                if mark_tag in no_table_list:
                    no_table_results[mark_tag] = utils_misc.aton(data)
                    perf_value = no_table_results[mark_tag]
                else:
                    tmp_dic[mark_tag] = utils_misc.aton(data)
                    perf_value = tmp_dic[mark_tag]
            else:
                test.error("Can not get the right data from result."
                           "Please check the debug file.")
            if mark_tag not in no_table_list and mark_tag not in order_list:
                order_list.append(mark_tag)
            test.write_perf_keyval(
                {'%s-%s' % (prefix_perf, mark_tag): perf_value})
        # start analyze the mpstat results
        if params.get('mpstat') == "yes":
            guest_cpu_infos = mpstat_ana(results_files[prefix][1])
            for vcpu in guest_cpu_infos:
                if vcpu != "all":
                    tmp_dic[vcpu] = float(guest_cpu_infos[vcpu])
                    order_list.append(vcpu)
            host_cpu_infos = mpstat_ana(results_files[prefix][2])
            tmp_dic["Hostcpu"] = float(host_cpu_infos["all"])
            order_list.append("Hostcpu")
        # Add some special key for cases
        if case_type == "ffsb":
            tmp_dic["MBps_per_Hostcpu"] = (tmp_dic["Thro-MBps"] /
                                           tmp_dic["Hostcpu"])
            order_list.append("MBps_per_Hostcpu")
        elif case_type == "iozone":
            sum_kbps = 0
            for mark in marks:
                mark_tag, _ = mark.split(":")
                sum_kbps += tmp_dic[mark_tag]
            tmp_dic["SUMKbps_per_Hostcpu"] = sum_kbps / tmp_dic["Hostcpu"]
            order_list.append("SUMKbps_per_Hostcpu")

    sum_marks = params.get("sum_marks", "").split()
    sum_matrix = {}
    order_line = ""
    if results_matrix.get("thread_tag"):
        headline = "%20s|" % results_matrix["thread_tag"]
        results_matrix.pop("thread_tag")
    else:
        headline = ""
    for index, tag in enumerate(order_list):
        headline += "%s|" % format_result(tag)
        order_line += "DATA%d|" % index
    headline = headline.rstrip("|")
    order_line = order_line.rstrip("|")

    result_path = utils_misc.get_path(resultsdir, "%s-result.RHS" % case_type)
    if os.path.isfile(result_path):
        result_file = open(result_path, "r+")
    else:
        result_file = open(result_path, "w")
        result_file.write("### kvm-userspace-version : %s\n" % kvm_ver)
        result_file.write("### kvm-version : %s\n" % host_ver)
        result_file.write("### guest-kernel-version :%s\n" % guest_ver)

    test.write_test_keyval({'category': headline})
    result_file.write("Category:ALL\n")
    matrix_order = params.get("matrix_order", "").split()
    if not matrix_order:
        matrix_order = results_matrix.keys()
        matrix_order.sort()
    for category in matrix_order:
        out_loop_line = order_line
        result_file.write("%s\n" % category)
        line = ""
        write_out_loop = True
        result_file.write("%s\n" % headline)
        for item in results_matrix[category]:
            if isinstance(results_matrix[category][item], dict):
                tmp_dic = results_matrix[category][item]
                line = "%s|" % format_result(item)
                for tag in order_list:
                    line += "%s|" % format_result(tmp_dic[tag])
                    if tag in sum_marks:
                        sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag],
                                                    tag)
                result_file.write("%s\n" % line.rstrip("|"))
                write_out_loop = False
            else:
                #line += "%s|" % format_result(results_matrix[category][item])
                re_data = "DATA%s" % order_list.index(item)
                out_loop_line = re.sub(
                    re_data, format_result(results_matrix[category][item]),
                    out_loop_line)
                if tag in sum_marks:
                    sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag], tag)
        if write_out_loop:
            result_file.write("%s\n" % out_loop_line)

    if sum_matrix:
        if case_type == "ffsb":
            sum_matrix["MBps_per_Hostcpu"] = (sum_matrix["Thro-MBps"] /
                                              sum_matrix["Hostcpu"])
            sum_marks.append("MBps_per_Hostcpu")
        result_file.write("Category:SUM\n")
        headline = ""
        line = ""
        if len(sum_matrix) < 4:
            for i in range(4 - len(sum_matrix)):
                headline += "%20s|" % "None"
                line += "%20d|" % 0
        for tag in sum_marks:
            headline += "%20s|" % tag
            line += "%s|" % format_result(sum_matrix[tag])

        result_file.write("%s\n" % headline.rstrip("|"))
        result_file.write("%s\n" % line.rstrip("|"))

    if no_table_results:
        no_table_order = params.get("no_table_order", "").split()
        if not no_table_order:
            no_table_order = no_table_results.keys()
            no_table_order.sort()
        for item in no_table_order:
            result_file.write("%s: %s\n" % (item, no_table_results[item]))

    result_file.close()
Example #34
0
    def test_connect(self):
        """
        Test connecting to a pool.

        :avocado: tags=pool,poolconnect,quick
        """

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        setidlist = self.params.get("setname", '/run/tests/setnames/*')
        setid = setidlist[0]
        expected_for_param.append(setidlist[1])

        # if any parameter results in failure then the test should FAIL
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            uid = os.geteuid()
            gid = os.getegid()

            # TODO make these params in the yaml
            daosctl = self.basepath + '/install/bin/daosctl'

            host1 = self.hostlist_servers[0]
            host2 = self.hostlist_servers[1]

            create_cmd = (
                "{0} create-pool "
                "-m {1} "
                "-u {2} "
                "-g {3} "
                "-s {4} "
                "-c 1".format(daosctl, "0731", uid, gid, setid))
            uuid_str = """{0}""".format(process.system_output(create_cmd))
            print("uuid is {0}\n".format(uuid_str))

            exists = check_for_pool.check_for_pool(host1, uuid_str)
            if exists != 0:
                self.fail("Pool {0} not found on host {1}.\n".
                          format(uuid_str, host1))
            exists = check_for_pool.check_for_pool(host2, uuid_str)
            if exists != 0:
                self.fail("Pool {0} not found on host {1}.\n".
                          format(uuid_str, host2))

            connect_cmd = ('{0} connect-pool -i {1} '
                           '-s {2} -r -l 0,1'.format(daosctl,
                                                     uuid_str, setid))
            process.system(connect_cmd)


            if expected_result == 'FAIL':
                self.fail("Expected to fail but passed.\n")

        except Exception as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Expecting to pass but test has failed.\n")
Example #35
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    disk_seclabel = params.get("disk_seclabel", "no")
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format", "qcow2")
    device_target = params.get("disk_target")
    device_bus = params.get("disk_target_bus")
    device_type = params.get("device_type", "file")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    sec_disk_dict = {
        'model': sec_model,
        'label': img_label,
        'relabel': sec_relabel
    }
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    disk_xml = Disk(type_name=device_type)
    disk_xml.device = "disk"
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                vol_format = "raw"
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {
                    'name': vol_name,
                    'format': vol_format,
                    'capacity': 1073741824,
                    'allocation': 1048576,
                }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                source_type = "dev"
                if pool_type == "iscsi":
                    disk_xml.device = "lun"
                    disk_xml.rawio = "yes"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" %
                                      qemu_conf)
                        libvirtd.restart()
            else:
                source_type = "file"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            source_type = "file"
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            if sec_relabel == "no":
                utils_selinux.set_context_of_file(filename=img_path,
                                                  context=img_label)

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": vol_format}
        if disk_seclabel == "yes":
            source_seclabel = []
            sec_xml = seclabel.Seclabel()
            sec_xml.update(sec_disk_dict)
            source_seclabel.append(sec_xml)
            disk_source = disk_xml.new_disk_source(**{
                "attrs": {
                    source_type: img_path
                },
                "seclabels": source_seclabel
            })
        else:
            disk_source = disk_xml.new_disk_source(
                **{"attrs": {
                    source_type: img_path
                }})
            # Set the context of the VM.
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()

        disk_xml.source = disk_source
        logging.debug(disk_xml)

        # Do the attach action.
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml,
                                         flagstr='--persistent')
        libvirt.check_exit_status(cmd_result, expect_error=False)
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(
                                val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1,
                                                   0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -Z %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s",
                              to_text(output.strip().split()[-2]))
                if default_label not in to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)

        cmd_result = virsh.detach_device(domainarg=vm_name,
                                         filearg=disk_xml.xml)
        libvirt.check_exit_status(cmd_result, status_error)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
 def run_cmd_out(cmd):
     return process.system_output(cmd,
                                  shell=True,
                                  ignore_status=True,
                                  sudo=True).strip()
Example #37
0
def get_image_filename_filesytem(params, root_dir):
    """
    Generate an image path from params and root_dir.

    :param params: Dictionary containing the test parameters.
    :param root_dir: Base directory for relative filenames.

    :note: params should contain:
           image_name -- the name of the image file, without extension
           image_format -- the format of the image (qcow2, raw etc)
    :raise VMDeviceError: When no matching disk found (in indirect method).
    """
    def sort_cmp(first, second):
        """
        This function used for sort to suit for this test, first sort by len
        then by value.
        """
        first_contains_digit = re.findall(r'[vhs]d[a-z]*[\d]+', first)
        second_contains_digit = re.findall(r'[vhs]d[a-z]*[\d]+', second)

        if not first_contains_digit and not second_contains_digit:
            if len(first) > len(second):
                return 1
            elif len(first) < len(second):
                return -1
        if len(first) == len(second):
            if first_contains_digit and second_contains_digit:
                return cmp(first, second)
            elif first_contains_digit:
                return -1
            elif second_contains_digit:
                return 1
        return cmp(first, second)

    image_name = params.get("image_name", "image")
    indirect_image_select = params.get("indirect_image_select")
    if indirect_image_select:
        re_name = image_name
        indirect_image_select = int(indirect_image_select)
        matching_images = process.system_output("ls -1d %s" % re_name)
        matching_images = sorted(matching_images.split('\n'), cmp=sort_cmp)
        if matching_images[-1] == '':
            matching_images = matching_images[:-1]
        try:
            image_name = matching_images[indirect_image_select]
        except IndexError:
            raise virt_vm.VMDeviceError(
                "No matching disk found for "
                "name = '%s', matching = '%s' and "
                "selector = '%s'" %
                (re_name, matching_images, indirect_image_select))
        for protected in params.get('indirect_image_blacklist', '').split(' '):
            match_image = re.match(protected, image_name)
            if match_image and match_image.group(0) == image_name:
                # We just need raise an error if it is totally match, such as
                # sda sda1 and so on, but sdaa should not raise an error.
                raise virt_vm.VMDeviceError(
                    "Matching disk is in blacklist. "
                    "name = '%s', matching = '%s' and "
                    "selector = '%s'" %
                    (re_name, matching_images, indirect_image_select))

    image_format = params.get("image_format", "qcow2")
    if params.get("image_raw_device") == "yes":
        return image_name
    if image_format:
        image_filename = "%s.%s" % (image_name, image_format)
    else:
        image_filename = image_name

    image_filename = utils_misc.get_path(root_dir, image_filename)
    return image_filename
Example #38
0
def run(test, params, env):
    """
    Ping the guest with different size of packets.

    1) Login to guest
    2) Ping test on nic(s) from host - default_ping/multi_nics
        2.1) Ping with packet size from 0 to 65507
        2.2) Flood ping test
        2.3) Ping test after flood ping, Check if the network is still alive
    3) Ping test from guest side to external host - ext_host
        3.1) Ping with packet size from 0 to 65507 (win guest is up to 65500)
        3.2) Flood ping test
        3.3) Ping test after flood ping, Check if the network is still alive

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    counts = params.get("ping_counts", 30)
    packet_sizes = params.get("packet_size", "").split()
    interval_times = params.get("interval_time", "1").split()
    timeout = int(params.get("login_timeout", 360))
    ping_ext_host = params.get("ping_ext_host", "no") == "yes"
    pre_cmd = params.get("pre_cmd", None)
    vm = env.get_vm(params["main_vm"])

    error_context.context("Login to guest", logging.info)
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    # get the test ip, interface & session
    dest_ips = []
    sessions = []
    interfaces = []
    if ping_ext_host:
        ext_host = params.get("ext_host", "")
        ext_host_get_cmd = params.get("ext_host_get_cmd", "")
        try:
            ext_host = process.system_output(ext_host_get_cmd, shell=True)
        except process.CmdError:
            logging.warn("Can't get specified host with cmd '%s',"
                         " Fallback to default host '%s'",
                         ext_host_get_cmd, ext_host)
        dest_ips = [ext_host]
        sessions = [session]
        interfaces = [None]
    else:
        # most of linux distribution don't add IP configuration for extra nics,
        # so get IP for extra nics via pre_cmd;
        if pre_cmd:
            session.cmd(pre_cmd, timeout=600)
        for i, nic in enumerate(vm.virtnet):
            ip = vm.get_address(i)
            if ip.upper().startswith("FE80"):
                interface = utils_net.get_neigh_attch_interface(ip)
            else:
                interface = None
            nic_name = nic.get("nic_name")
            if not ip:
                test.fail("Could not get the ip of nic index %d: %s",
                          i, nic_name)
            dest_ips.append(ip)
            sessions.append(None)
            interfaces.append(interface)

    for (ip, interface, session) in zip(dest_ips, interfaces, sessions):
        error_context.context("Ping test with dest: %s" % ip, logging.info)

        # ping with different size & interval
        for size in packet_sizes:
            for interval in interval_times:
                logging.info("Ping with packet size: %s and interval: %s" %
                             (size, interval))
                _ping_with_params(test, params, ip, interface, size,
                                  interval, session=session, count=counts)

        # ping with flood
        if not ping_ext_host or params.get("os_type") == "linux":
            error_context.context("Flood ping test", logging.info)
            _ping_with_params(test, params, ip, interface,
                              session=session, flood=True)

            # ping to check whether the network is alive
            error_context.context("Ping test after flood ping,"
                                  " Check if the network is still alive",
                                  logging.info)
            _ping_with_params(test, params, ip, interface,
                              session=session, count=counts)
Example #39
0
    def setUp(self):
        '''
        set up required packages and gather necessary test inputs.
        Test all services.
        '''
        smm = SoftwareManager()
        detected_distro = distro.detect()
        self.log.info("Test is running on: %s", detected_distro.name)
        if not smm.check_installed("ksh") and not smm.install("ksh"):
            self.cancel('ksh is needed for the test to be run')
        if detected_distro.name == "Ubuntu":
            if not smm.check_installed("python-paramiko") and not \
                                      smm.install("python-paramiko"):
                self.cancel('python-paramiko is needed for the test to be run')
            ubuntu_url = self.params.get('ubuntu_url', default=None)
            debs = self.params.get('debs', default=None)
            for deb in debs:
                deb_url = os.path.join(ubuntu_url, deb)
                deb_install = self.fetch_asset(deb_url, expire='7d')
                shutil.copy(deb_install, self.workdir)
                process.system("dpkg -i %s/%s" % (self.workdir, deb),
                               ignore_status=True,
                               sudo=True)
        else:
            url = self.params.get('url', default=None)
            rpm_install = self.fetch_asset(url, expire='7d')
            shutil.copy(rpm_install, self.workdir)
            os.chdir(self.workdir)
            process.run('chmod +x ibmtools')
            process.run('./ibmtools --install --managed')
        try:
            process.run("startsrc -g rsct", shell=True, sudo=True)
        except CmdError as details:
            self.log.debug(str(details))
            self.cancel("Command startsrc -g rsct failed")

        try:
            process.run("startsrc -g rsct_rm", shell=True, sudo=True)
        except CmdError as details:
            self.log.debug(str(details))
            self.cancel("Command startsrc -g rsct_rm failed")

        output = process.system_output("lssrc -a",
                                       ignore_status=True,
                                       shell=True,
                                       sudo=True)
        if "inoperative" in output:
            self.cancel("Failed to start the rsct and rsct_rm services")

        self.hmc_ip = self.params.get("hmc_ip", '*', default=None)
        self.hmc_pwd = self.params.get("hmc_pwd", '*', default=None)
        self.hmc_username = self.params.get("hmc_username", '*', default=None)
        self.lpar_1 = self.params.get("lpar_1", '*', default=None)
        self.lpar_2 = self.params.get("lpar_2", '*', default=None)
        self.pci_device = self.params.get("pci_device", '*', default=None)
        self.server = self.params.get("server", '*', default=None)
        self.loc_code = pci.get_slot_from_sysfs(self.pci_device)
        self.num_of_dlpar = int(self.params.get("num_of_dlpar", default='1'))
        if self.loc_code is None:
            self.cancel("Failed to get the location code for the pci device")
        self.login(self.hmc_ip, self.hmc_username, self.hmc_pwd)
        self.run_command("uname -a")
        cmd = 'lshwres -r io -m %s --rsubtype slot --filter lpar_names=%s ' \
              '-F drc_index,lpar_id,drc_name,bus_id' % (self.server,
                                                        self.lpar_1)
        output = self.run_command(cmd)
        for line in output:
            if self.loc_code in line:
                self.drc_index = line.split(',')[0]
                self.lpar_id = line.split(',')[1]
                self.phb = line.split(',')[3]
                break

        self.log.info("lpar_id : %s, loc_code: %s, drc_index: %s, phb: %s",
                      self.lpar_id, self.loc_code, self.drc_index, self.phb)
Example #40
0
 def test(self):
     args = []
     cmdline = ''
     timeout = ''
     if not (self.stressors or self.v_stressors):
         if 'all' in self.class_type:
             args.append('--all %s ' % self.workers)
         elif 'cpu' in self.class_type:
             self.workers = 2 * multiprocessing.cpu_count()
             args.append('--cpu %s --cpu-method all ' % self.workers)
         else:
             args.append('--class %s --sequential %s ' %
                         (self.class_type, self.workers))
     else:
         if self.parallel:
             if self.stressors:
                 for stressor in self.stressors.split(' '):
                     cmdline += '--%s %s ' % (stressor, self.workers)
             if self.v_stressors:
                 for v_stressor in self.v_stressors.split(' '):
                     cmdline += '--%s %s ' % (v_stressor, self.workers)
             args.append(cmdline)
     if self.class_type in ['memory', 'vm', 'all']:
         args.append('--vm-bytes 80% ')
     if self.aggressive and self.maximize:
         args.append('--aggressive --maximize --oomable ')
     if self.exclude:
         args.append('--exclude %s ' % self.exclude)
     if self.verify:
         args.append('--verify ')
     if self.syslog:
         args.append('--syslog ')
     if self.metrics:
         args.append('--metrics ')
     if self.times:
         args.append('--times ')
     cmd = 'stress-ng %s' % " ".join(args)
     if self.parallel:
         if self.ttimeout:
             cmd += ' --timeout %s ' % self.ttimeout
         process.run(cmd, ignore_status=True, sudo=True)
     else:
         if self.ttimeout:
             timeout = ' --timeout %s ' % self.ttimeout
         if self.stressors:
             for stressor in self.stressors.split(' '):
                 stress_cmd = ' --%s %s %s' % (stressor, self.workers,
                                               timeout)
                 process.run("%s %s" % (cmd, stress_cmd),
                             ignore_status=True,
                             sudo=True)
         if self.ttimeout and self.v_stressors:
             timeout = ' --timeout %s ' % str(
                 int(self.ttimeout) + int(memory.memtotal() / 1024 / 1024))
         if self.v_stressors:
             for stressor in self.v_stressors.split(' '):
                 stress_cmd = ' --%s %s %s' % (stressor, self.workers,
                                               timeout)
                 process.run("%s %s" % (cmd, stress_cmd),
                             ignore_status=True,
                             sudo=True)
     collect_dmesg(self)
     ERROR = []
     pattern = [
         'WARNING: CPU:', 'Oops', 'Segfault', 'soft lockup',
         'Unable to handle', 'Hard LOCKUP'
     ]
     logs = process.system_output('dmesg').splitlines()
     for fail_pattern in pattern:
         for log in logs:
             if fail_pattern in log:
                 ERROR.append(log)
     if ERROR:
         self.fail("Test failed with following errors in dmesg :  %s " %
                   "\n".join(ERROR))
Example #41
0
    def test_create_one(self):
        """
        Test issuing a single  pool create commands at once.

        :avocado: tags=pool,poolcreate,multicreate
        """
        global basepath

        # Accumulate a list of pass/fail indicators representing
        # what is expected for each parameter then "and" them
        # to determine the expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/tests/modes/*')
        mode = modelist[0]
        expected_for_param.append(modelist[1])

        setidlist = self.params.get("setname", '/run/tests/setnames/*')
        setid = setidlist[0]
        expected_for_param.append(setidlist[1])

        uid = os.geteuid()
        gid = os.getegid()

        # if any parameter results in failure then the test should FAIL
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            daosctl = basepath + '/install/bin/daosctl'
            cmd = ('{0} create-pool -m {1} -u {2} -g {3} -s {4} -c 1'.format(
                daosctl, mode, uid, gid, setid))
            uuid_str = """{0}""".format(process.system_output(cmd))
            print("uuid is {0}\n".format(uuid_str))

            hostfile = basepath + self.params.get("hostfile",
                                                  '/run/files/local/')
            host = GetHostsFromFile.getHostsFromFile(hostfile)[0]
            exists = CheckForPool.checkForPool(host, uuid_str)
            if exists != 0:
                self.fail("Pool {0} not found on host {1}.\n".format(
                    uuid_str, host))

            delete_cmd = ('{0} destroy-pool '
                          '-i {1} -s {2} -f'.format(daosctl, uuid_str, setid))

            process.system(delete_cmd)

            exists = CheckForPool.checkForPool(host, uuid_str)
            if exists == 0:
                self.fail("Pool {0} found on host {1} after destroy.\n".format(
                    uuid_str, host))

            if expected_result == 'FAIL':
                self.fail("Expected to fail but passed.\n")

        except Exception as e:
            print e
            print traceback.format_exc()
            if expected_result == 'PASS':
                self.fail("Expecting to pass but test has failed.\n")
Example #42
0
    def setUp(self):
        """
        Set up.
        """
        self.policy = self.params.get('policy', default='service-time')
        self.policies = ["service-time", "round-robin", "queue-length"]
        # We will remove and add the policy back, so that this becomes
        # the last member of the list. This is done so that in the
        # policy change test later, this policy is set in the last
        # iteration.
        self.policies.remove(self.policy)
        self.policies.append(self.policy)
        self.op_shot_sleep_time = 60
        self.op_long_sleep_time = 180
        # Install needed packages
        dist = distro.detect()
        pkg_name = ""
        svc_name = ""
        if dist.name in ['Ubuntu', 'debian']:
            pkg_name += "multipath-tools"
            svc_name = "multipath-tools"
        elif dist.name == 'SuSE':
            pkg_name += "multipath-tools"
            svc_name = "multipathd"
        else:
            pkg_name += "device-mapper-multipath"
            svc_name = "multipathd"

        smm = SoftwareManager()
        if not smm.check_installed(pkg_name) and not smm.install(pkg_name):
            self.cancel("Can not install %s" % pkg_name)

        # Check if given multipath devices are present in system
        self.wwids = self.params.get('wwids', default='').split(' ')
        system_wwids = multipath.get_multipath_wwids()
        wwids_to_remove = []
        for wwid in self.wwids:
            if wwid not in system_wwids:
                self.log.info("%s not present in the system", wwid)
                wwids_to_remove.append(wwid)
        for wwid in wwids_to_remove:
            self.wwids.remove(wwid)
        if self.wwids == ['']:
            self.cancel("No Multipath Devices Given")

        # Create service object
        self.mpath_svc = service.SpecificServiceManager(svc_name)
        self.mpath_svc.restart()
        wait.wait_for(self.mpath_svc.status, timeout=10)

        # Take a backup of current config file
        self.mpath_file = "/etc/multipath.conf"
        if os.path.isfile(self.mpath_file):
            shutil.copyfile(self.mpath_file, "%s.bkp" % self.mpath_file)

        self.mpath_list = []

        # Find all details of multipath devices
        for wwid in self.wwids:
            if wwid not in process.system_output('multipath -ll',
                                                 ignore_status=True,
                                                 shell=True).decode('utf-8'):
                continue
            self.mpath_dic = {}
            self.mpath_dic["wwid"] = wwid
            self.mpath_dic["name"] = multipath.get_mpath_name(wwid)
            self.mpath_dic["paths"] = multipath.get_paths(wwid)
            self.mpath_dic["policy"] = multipath.get_policy(wwid)
            self.mpath_dic["size"] = multipath.get_size(wwid)
            self.mpath_list.append(self.mpath_dic)
        pprint(self.mpath_list)
Example #43
0
    def testbigping(self):
        '''
        check with different maximum transfer unit values
        '''
        cmd = "ip addr show  | grep %s | grep -oE '[^ ]+$'" % self.peer
        cmd = "ssh %s \"%s\"" % (self.peer, cmd)
        errors = []
        try:
            peer_interface = process.system_output(cmd, shell=True).strip()
        except process.CmdError:
            self.fail("failed to get info of peer interface")
        try:
            mtuval = process.system_output("ip link show %s" % self.iface,
                                           shell=True).split()[4]
        except process.CmdError:
            self.fail("failed to get mtu value of %s" % self.iface)
        for mtu in self.mtu_list:
            mtu_set = False
            self.log.info("trying with mtu %s", mtu)
            # ping the peer machine with different maximum transfers unit sizes
            # and finally set maximum transfer unit size to 1500 Bytes
            cmd = "ssh %s \"ip link set %s mtu %s\"" % (self.peer,
                                                        peer_interface, mtu)
            try:
                process.system(cmd, shell=True)
            except process.CmdError:
                self.log.debug("setting mtu value %s in peer failed", mtu)
            else:
                mtu_set = True
            con_cmd = "ip link set %s mtu %s" % (self.iface, mtu)
            try:
                process.system(con_cmd, shell=True)
            except process.CmdError:
                self.log.debug("setting mtu value %s in host failed", mtu)
            else:
                mtu_set = True
            time.sleep(10)
            if mtu_set:
                mtu = int(mtu) - 28
                cmd_ping = "ping -i 0.1 -c 30 -s %s %s" % (mtu, self.peer)
                ret = process.system(cmd_ping, shell=True, ignore_status=True)
                if ret != 0:
                    errors.append(str(int(mtu) + 28))
            else:
                errors.append(mtu)

            con_cmd = "ip link set %s mtu %s" % (self.iface, mtuval)
            try:
                process.system(con_cmd, shell=True)
            except process.CmdError:
                self.log.debug("setting original mtu value in host failed")
            cmd = "ssh %s \"ip link set %s mtu %s\"" % (self.peer,
                                                        peer_interface, mtuval)
            try:
                process.system(cmd, shell=True)
            except process.CmdError:
                self.log.debug("setting original mtu value in peer failed")
            time.sleep(10)

        if errors:
            self.fail("bigping test failed for %s" % " ".join(errors))
Example #44
0
def run_pict(binary, parameter_file, order):
    cmd = "%s %s /o:%s" % (binary, parameter_file, order)
    return process.system_output(cmd, shell=True)
Example #45
0
def run(test, params, env):
    """
    Run Pktgen test between host/guest

    1) Boot the main vm, or just grab it if it's already booted.
    2) Configure pktgen on guest or host
    3) Run pktgen test, finish when timeout

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def _pin_vm_threads(node):
        """
        pin guest vcpu and vhost threads to cpus of a numa node repectively

        :param node: which numa node to pin
        """
        if node:
            if not isinstance(node, utils_misc.NumaNode):
                node = utils_misc.NumaNode(int(node))
            utils_test.qemu.pin_vm_threads(vm, node)

    timeout = float(params.get("pktgen_test_timeout", "240"))
    run_threads = params.get("pktgen_threads", 1)
    record_list = params.get("record_list")
    error_context.context("Init the VM, and try to login", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session_serial = vm.wait_for_serial_login(restart_network=True)
    session = vm.wait_for_login()

    # print numa information on host and pinning vhost and vcpus to cpus
    process.system_output("numactl --hardware")
    process.system_output("numactl --show")
    _pin_vm_threads(params.get("numa_node"))

    # get parameter from dictionary
    category = params.get("category")
    pkt_size = params.get("pkt_size")
    kvm_ver_chk_cmd = params.get("kvm_ver_chk_cmd")
    guest_ver_cmd = params["guest_ver_cmd"]

    # get qemu, guest kernel and kvm version info and write them into result
    result_path = utils_misc.get_path(test.resultsdir, "pktgen_perf.RHS")
    result_file = open(result_path, "w")
    kvm_ver = process.system_output(kvm_ver_chk_cmd, shell=True).decode()
    host_ver = os.uname()[2]
    guest_ver = session.cmd_output(guest_ver_cmd, timeout)
    result_file.write("### kvm-userspace-ver : %s\n" % kvm_ver)
    result_file.write("### kvm_version : %s\n" % host_ver)
    result_file.write("### guest-kernel-ver :%s\n" % guest_ver)

    # get record_list
    record_line = ""
    for record in record_list.split():
        record_line += "%s|" % format_result(record)

    def install_package(ver, session=None):
        """ check module pktgen, install kernel-modules-internal package """

        output_cmd = _system_output
        kernel_ver = "kernel-modules-internal-%s" % ver
        cmd_download = "cd /tmp && brew download-build %s --rpm" % kernel_ver
        cmd_install = "cd /tmp && rpm -ivh  %s.rpm --force --nodeps" % kernel_ver
        output_cmd(cmd_download).decode()
        cmd_clean = "rm -rf /tmp/%s.rpm" % kernel_ver
        if session:
            output_cmd = session.cmd_output
            local_path = "/tmp/%s.rpm" % kernel_ver
            remote_path = "/tmp/"
            vm.copy_files_to(local_path, remote_path)
        output_cmd(cmd_install)
        output_cmd(cmd_clean)

    check_cmd = "uname -r |grep el8"
    if process.run(check_cmd, shell=True):
        install_package(host_ver)
    if session.cmd(check_cmd):
        install_package(guest_ver.strip(), session=session)

    # get result tested by each scenario
    for pkt_cate in category.split():
        result_file.write("Category:%s\n" % pkt_cate)
        result_file.write("%s\n" % record_line.rstrip("|"))

        # copy pktgen_test script to test server
        local_path = os.path.join(data_dir.get_shared_dir(),
                                  "scripts/pktgen_perf.sh")
        remote_path = "/tmp/pktgen_perf.sh"
        if pkt_cate == "tx":
            vm.copy_files_to(local_path, remote_path)
        elif pkt_cate == "rx":
            process.run("cp %s %s" % (local_path, remote_path))

        for size in pkt_size.split():
            if pkt_cate == "tx":
                error_context.context("test guest tx pps performance",
                                      logging.info)
                guest_mac = vm.get_mac_address(0)
                pktgen_interface = utils_net.get_linux_ifname(
                    session, guest_mac)
                dsc_dev = utils_net.Interface(vm.get_ifname(0))
                dsc = dsc_dev.get_mac()
                runner = session.cmd
                pktgen_ip = vm.wait_for_get_address(0, timeout=5)
                pkt_cate_r = run_test(session_serial, runner, remote_path,
                                      pktgen_ip, dsc, pktgen_interface,
                                      run_threads, size, timeout)
            elif pkt_cate == "rx":
                error_context.context("test guest rx pps performance",
                                      logging.info)
                host_bridge = params.get("netdst", "switch")
                host_nic = utils_net.Interface(host_bridge)
                pktgen_ip = host_nic.get_ip()
                dsc = vm.wait_for_get_address(0, timeout=5)
                pktgen_interface = vm.get_ifname(0)
                runner = _system_output
                pkt_cate_r = run_test(session_serial, runner, remote_path,
                                      pktgen_ip, dsc, pktgen_interface,
                                      run_threads, size, timeout)
            line = "%s|" % format_result(size)
            line += "%s" % format_result(pkt_cate_r)
            result_file.write(("%s\n" % line))

    error_context.context(
        "Verify Host and guest kernel no error\
                           and call trace", logging.info)
    vm.verify_kernel_crash()
    utils_misc.verify_dmesg()
    result_file.close()
    session_serial.close()
    session.close()
Example #46
0
 def test_networkconfig(self):
     '''
     check Network_configuration
     '''
     cmd = "ethtool -i %s | grep driver | awk '{print $2}'" % self.iface
     driver = process.system_output(cmd, shell=True).strip()
     self.log.info(driver)
     cmd = "ethtool -i %s | grep bus-info | awk '{print $2}'" % self.iface
     businfo = process.system_output(cmd, shell=True).strip()
     self.log.info(businfo)
     cmd = "lspci -v"
     bus_info = process.system_output(cmd, shell=True).strip()
     bus_info = bus_info.split('\n\n')
     self.log.info("Performing driver match check using lspci and ethtool")
     self.log.info("-----------------------------------------------------")
     for value in bus_info:
         if value.startswith(businfo):
             self.log.info("details are ---------> %s" % value)
             tmp = value.split('\n')
             for val in tmp:
                 if 'Kernel driver in use' in val:
                     cmd, driverinfo = val.split(': ')
                     self.log.info(driverinfo)
                     if driver != driverinfo:
                         self.fail("mismatch in driver information")
     cmd = r"cat /sys/module/%s/drivers/pci\:%s/%s/net/%s/mtu" %\
           (driver, driver, businfo, self.iface)
     mtu = process.system_output(cmd, shell=True).strip()
     self.log.info("mtu value is %s" % mtu)
     cmd = r"cat /sys/module/%s/drivers/pci\:%s/%s/net/%s/operstate" %\
           (driver, driver, businfo, self.iface)
     operstate = process.system_output(cmd, shell=True).strip()
     self.log.info("operstate is %s" % operstate)
     cmd = r"cat /sys/module/%s/drivers/pci\:%s/%s/net/%s/duplex" %\
           (driver, driver, businfo, self.iface)
     duplex = process.system_output(cmd, shell=True).strip()
     self.log.info("transmission mode is %s" % duplex)
     cmd = r"cat /sys/module/%s/drivers/pci\:%s/%s/net/%s/address" %\
           (driver, driver, businfo, self.iface)
     address = process.system_output(cmd, shell=True).strip()
     self.log.info("mac address is %s" % address)
     cmd = r"cat /sys/module/%s/drivers/pci\:%s/%s/net/%s/speed" %\
           (driver, driver, businfo, self.iface)
     speed = process.system_output(cmd, shell=True).strip()
     self.log.info("speed is %s" % speed)
     cmd = "ethtool %s | grep Speed | awk '{print $2}'" % self.iface
     eth_speed = process.system_output(cmd, shell=True).strip()
     eth_speed = eth_speed.strip('Mb/s')
     self.log.info("Performing Ethtool and interface checks for interface")
     self.log.info("-----------------------------------------------------")
     if speed != eth_speed:
         self.fail("mis match in speed")
     hw_addr = netifaces.ifaddresses(self.iface)[netifaces.AF_LINK]
     hw_addr = hw_addr[0]['addr']
     if hw_addr != address:
         self.fail("mismatch in hardware address")
     mtuval = process.system_output("ip link show %s" % self.iface,
                                    shell=True).split()[4]
     self.log.info("through ip link show, mtu value is %s" % mtuval)
     if mtu != mtuval:
         self.fail("mismatch in mtu")
     eth_state = process.system_output("ethtool %s | grep 'Link detected:'\
                                       " % self.iface,
                                       shell=True)
     if 'yes' in eth_state and operstate == 'down':
         self.fail("mis match in link state")
     if 'no' in eth_state and operstate == 'up':
         self.fail("mis match in link state")
Example #47
0
def collect_dmesg(object):
    object.whiteboard = process.system_output("dmesg")
Example #48
0
def collect_dmesg(object):
    return process.system_output("dmesg").decode()
Example #49
0
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    case = params.get('case', '')
    blockcommand = params.get('blockcommand', '')
    blk_top = int(params.get('top', 0))
    blk_base = int(params.get('base', 0))
    opts = params.get('opts', '--verbose --wait')
    check_func = params.get('check_func', '')
    disk_type = params.get('disk_type', '')
    disk_src = params.get('disk_src', '')
    driver_type = params.get('driver_type', 'qcow2')
    vol_name = params.get('vol_name', 'vol_blockpull')
    pool_name = params.get('pool_name', '')
    brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name)
    vg_name = params.get('vg_name', 'HostVG')
    vol_size = params.get('vol_size', '10M')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    # List to collect paths to delete after test
    file_to_del = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        all_disks = vmxml.get_disk_source(vm_name)
        if not all_disks:
            test.error('Not found any disk file in vm.')
        image_file = all_disks[0].find('source').get('file')
        logging.debug('Image file of vm: %s', image_file)

        # Get all dev of virtio disks to calculate the dev of new disk
        all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')]
        disk_dev = all_vdisks[-1].find('target').get('dev')
        new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1)

        # Setup iscsi target
        if disk_src == 'iscsi':
            disk_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target: %s', disk_target)

        # Setup lvm
        elif disk_src == 'lvm':
            # Stop multipathd to avoid vgcreate fail
            multipathd = service.Factory.create_service("multipathd")
            multipathd_status = multipathd.status()
            if multipathd_status:
                multipathd.stop()

            # Setup iscsi target
            device_name = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target for lvm: %s', device_name)

            # Create logical device
            logical_device = device_name
            lv_utils.vg_create(vg_name, logical_device)
            vg_created = True

            # Create logical volume as backing store
            vol_bk, vol_disk = 'vol1', 'vol2'
            lv_utils.lv_create(vg_name, vol_bk, vol_size)

            disk_target = '/dev/%s/%s' % (vg_name, vol_bk)
            src_vol = '/dev/%s/%s' % (vg_name, vol_disk)

        # Setup gluster
        elif disk_src == 'gluster':
            host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True, brick_path=brick_path, **params)
            logging.debug(host_ip)
            gluster_img = 'test.img'
            img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img
            process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
                        % (host_ip, vol_name, img_create_cmd), shell=True)
            disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img)

        else:
            test.error('Wrong disk source, unsupported by this test.')

        new_image = os.path.join(os.path.split(image_file)[0], 'test.img')
        params['snapshot_list'] = ['s%d' % i for i in range(1, 5)]

        if disk_src == 'lvm':
            new_image = src_vol
            if disk_type == 'block':
                new_image = disk_target
                for i in range(2, 6):
                    lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size)
                snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)]
        else:
            file_to_del.append(new_image)
            snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']]
        cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image)
        if disk_type == 'block' and driver_type == 'raw':
            pass
        else:
            process.run(cmd_create_img, verbose=True, shell=True)
        info_new = utils_misc.get_image_info(new_image)
        logging.debug(info_new)

        # Create xml of new disk and add it to vmxml
        if disk_type:
            new_disk = Disk()
            new_disk.xml = libvirt.create_disk_xml({
                'type_name': disk_type,
                'driver_type': driver_type,
                'target_dev': new_dev,
                'source_file': new_image
            })

            logging.debug(new_disk.xml)

            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.xmltreefile.write()
            logging.debug(vmxml)
            vmxml.sync()

        vm.start()
        logging.debug(virsh.dumpxml(vm_name))

        # Create backing chain
        for i in range(len(params['snapshot_list'])):
            virsh.snapshot_create_as(
                vm_name,
                '%s --disk-only --diskspec %s,file=%s,stype=%s' %
                (params['snapshot_list'][i], new_dev, snapshot_image_list[i],
                 disk_type),
                **virsh_dargs
            )

            # Get path of each snapshot file
            snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines()
            for line in snaps:
                if line.lstrip().startswith(('hd', 'sd', 'vd')):
                    file_to_del.append(line.split()[-1])

        qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1]
        if libvirt_storage.check_qemu_image_lock_support():
            qemu_img_cmd += " -U"
        bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text

        if not disk_type == 'block':
            bc_chain = snapshot_image_list[::-1] + [new_image, disk_target]
        else:
            bc_chain = snapshot_image_list[::-1] + [new_image]
        bc_result = check_backingchain(bc_chain, bc_info)
        if not bc_result:
            test.fail('qemu-img info output of backing chain is not correct: %s'
                      % bc_info)

        # Generate blockpull/blockcommit options
        virsh_blk_cmd = eval('virsh.%s' % blockcommand)
        if blockcommand == 'blockpull' and blk_base != 0:
            opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev)
        elif blockcommand == 'blockcommit':
            opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else ''
            opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else ''
            opts += opt_top + opt_base + ' --active' if blk_top == 0 else ''

        # Do blockpull/blockcommit
        virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs)
        if blockcommand == 'blockcommit':
            virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("XML after %s: %s" % (blockcommand, vmxml))

        # Check backing chain after blockpull/blockcommit
        check_bc_func_name = 'check_bc_%s' % check_func
        if check_bc_func_name in globals():
            check_bc = eval(check_bc_func_name)
            if not callable(check_bc):
                logging.warning('Function "%s" is not callable.', check_bc_func_name)
            if not check_bc(blockcommand, vmxml, new_dev, bc_chain):
                test.fail('Backing chain check after %s failed' % blockcommand)
        else:
            logging.warning('Function "%s" is not implemented.', check_bc_func_name)

        virsh.dumpxml(vm_name, debug=True)

        # Check whether login is successful
        try:
            vm.wait_for_login().close()
        except Exception as e:
            test.fail('Vm login failed')

    finally:
        logging.info('Start cleaning up.')
        for ss in params.get('snapshot_list', []):
            virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True)
        bkxml.sync()
        for path in file_to_del:
            logging.debug('Remove %s', path)
            if os.path.exists(path):
                os.remove(path)
        if disk_src == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'lvm':
            process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True)
            if 'vol_bk' in locals():
                lv_utils.lv_remove(vg_name, vol_bk)
            if 'vg_created' in locals() and vg_created:
                lv_utils.vg_remove(vg_name)
                cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
                pv_name = process.system_output(cmd, shell=True, verbose=True).strip()
                if pv_name:
                    process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'gluster':
            gluster.setup_or_cleanup_gluster(
                is_setup=False, brick_path=brick_path, **params)
        if 'multipathd_status' in locals() and multipathd_status:
            multipathd.start()
Example #50
0
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    exist_filter = params.get("exist_filter", "no-mac-spoofing")
    check_cmd = params.get("check_cmd")
    expect_match = params.get("expect_match")
    status_error = "yes" == params.get("status_error", "no")
    mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no")
    kill_libvirtd = "yes" == params.get("kill_libvirtd", "no")
    bug_url = params.get("bug_url", "")
    ipset_command = params.get("ipset_command")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    username = params.get("username")
    password = params.get("password")

    # Prepare vm filterref parameters dict list
    filter_param_list = []
    params_key = []
    for i in params.keys():
        if 'parameter_name_' in i:
            params_key.append(i)
    params_key.sort()
    for i in range(len(params_key)):
        params_dict = {}
        params_dict['name'] = params[params_key[i]]
        params_dict['value'] = params['parameter_value_%s' % i]
        filter_param_list.append(params_dict)
    filterref_dict = {}
    filterref_dict['name'] = filter_name
    filterref_dict['parameters'] = filter_param_list

    # backup vm xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    libvirtd = utils_libvirtd.Libvirtd()
    device_name = None
    try:
        rule = params.get("rule")
        if rule:
            # Create new filter xml
            filterxml = utlv.create_nwfilter_xml(params)
            # Define filter xml
            virsh.nwfilter_define(filterxml.xml, debug=True)

        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        if mount_noexec_tmp:
            device_name = utlv.setup_or_cleanup_iscsi(is_setup=True)
            utlv.mkfs(device_name, 'ext4')
            cmd = "mount %s /tmp -o noexec,nosuid" % device_name
            process.run(cmd, shell=True)

        if ipset_command:
            pkg = "ipset"
            if not utils_package.package_install(pkg):
                test.cancel("Can't install ipset on host")
            process.run(ipset_command, shell=True)

        # Run command
        try:
            vm.start()
            if not mount_noexec_tmp:
                vm.wait_for_serial_login(username=username, password=password)
            vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            iface_xml = vmxml.get_devices('interface')[0]
            iface_target = iface_xml.target['dev']
            logging.debug("iface target dev name is %s", iface_target)

            # Check iptables or ebtables on host
            if check_cmd:
                if "DEVNAME" in check_cmd:
                    check_cmd = check_cmd.replace("DEVNAME", iface_target)
                ret = utils_misc.wait_for(lambda: not process.system(
                    check_cmd, ignore_status=True, shell=True),
                                          timeout=30)
                if not ret:
                    test.fail("Rum command '%s' failed" % check_cmd)
                out = to_text(
                    process.system_output(check_cmd,
                                          ignore_status=False,
                                          shell=True))
                if expect_match and not re.search(expect_match, out):
                    test.fail("'%s' not found in output: %s" %
                              (expect_match, out))

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        if kill_libvirtd:
            cmd = "kill -s TERM `pidof libvirtd`"
            process.run(cmd, shell=True)
            ret = utils_misc.wait_for(lambda: not libvirtd.is_running(),
                                      timeout=30)
            if not ret:
                test.fail("Failed to kill libvirtd. %s" % bug_url)

    finally:
        if kill_libvirtd:
            libvirtd.restart()
        # Clean env
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Undefine created filter
        if filter_name != exist_filter:
            virsh.nwfilter_undefine(filter_name, debug=True)
        if mount_noexec_tmp:
            if device_name:
                process.run("umount -l %s" % device_name,
                            ignore_status=True,
                            shell=True)
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        if ipset_command:
            process.run("ipset destroy blacklist", shell=True)
Example #51
0
 def get_iso_size(iso_file):
     """ Get the size of iso on host."""
     error_context.context("Get the iso size on host.", logging.info)
     return int(
         process.system_output('ls -l %s | awk \'{print $5}\'' % iso_file,
                               shell=True).decode())
Example #52
0
 def setUp(self):
     '''
     To check and install dependencies for the test
     '''
     detected_distro = distro.detect()
     sm = SoftwareManager()
     depends = []
     if detected_distro.name == "Ubuntu":
         depends.append("openssh-client")
     if detected_distro.name in ["redhat", "fedora", "centos"]:
         depends.append("openssh-clients")
     if detected_distro.name == "SuSE":
         depends.append("openssh")
     for pkg in depends:
         if not sm.check_installed(pkg) and not sm.install(pkg):
             self.skip("%s package is need to test" % pkg)
     interfaces = netifaces.interfaces()
     self.user = self.params.get("user_name", default="root")
     self.host_interfaces = self.params.get("host_interfaces",
                                            default="").split(",")
     if not self.host_interfaces:
         self.skip("user should specify host interfaces")
     self.peer_interfaces = self.params.get("peer_interfaces",
                                            default="").split(",")
     for self.host_interface in self.host_interfaces:
         if self.host_interface not in interfaces:
             self.skip("interface is not available")
     self.peer_first_ipinterface = self.params.get("peer_ip", default="")
     if not self.peer_interfaces or self.peer_first_ipinterface == "":
         self.skip("peer machine should available")
     msg = "ip addr show  | grep %s | grep -oE '[^ ]+$'"\
           % self.peer_first_ipinterface
     cmd = "ssh %s@%s %s" % (self.user, self.peer_first_ipinterface, msg)
     self.peer_first_interface = process.system_output(cmd,
                                                       shell=True).strip()
     if self.peer_first_interface == "":
         self.fail("test failed because peer interface can not retrieved")
     self.bond_name = self.params.get("bond_name", default="tempbond")
     self.mode = self.params.get("bonding_mode", default="")
     if self.mode == "":
         self.skip("test skipped because mode not specified")
     self.host_ips = []
     self.peer_ips = [self.peer_first_ipinterface]
     for val in self.host_interfaces:
         cmd = "ip -f inet -o addr show %s | awk '{print $4}' | cut -d /\
               -f1" % val
         local_ip = process.system_output(cmd, shell=True).strip()
         if local_ip == "":
             self.fail("test failed because local ip can not retrieved")
         self.host_ips.append(local_ip)
     for val in self.peer_interfaces:
         msg = "ip -f inet -o addr show %s | awk '{print $4}' | cut -d /\
               -f1" % val
         cmd = "ssh %s@%s \"%s\""\
               % (self.user, self.peer_first_ipinterface, msg)
         peer_ip = process.system_output(cmd, shell=True).strip()
         cmd = 'echo %s | cut -d " " -f4' % peer_ip
         peer_ip = process.system_output(cmd, shell=True).strip()
         if peer_ip == "":
             self.fail("test failed because peer ip can not retrieved")
         self.peer_ips.append(peer_ip)
     self.peer_interfaces.insert(0, self.peer_first_interface)
     self.net_mask = []
     st = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     for val in self.host_interfaces:
         mask = socket.inet_ntoa(
             fcntl.ioctl(st.fileno(), 0x891b,
                         struct.pack('256s', val))[20:24]).strip('\n')
         self.net_mask.append(mask)
     self.bonding_slave_file = "/sys/class/net/%s/bonding/slaves"\
                               % self.bond_name
Example #53
0
    def setUp(self):
        """
        Build xfstest
        Source: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
        """
        self.use_dd = False
        root_fs = process.system_output(
            "df -T / | awk 'END {print $2}'", shell=True).decode("utf-8")
        if root_fs in ['ext3', 'ext4']:
            self.use_dd = True
        self.dev_type = self.params.get('type', default='loop')

        sm = SoftwareManager()

        self.detected_distro = distro.detect()

        packages = ['e2fsprogs', 'automake', 'gcc', 'quota', 'attr',
                    'make', 'xfsprogs', 'gawk']
        if self.detected_distro.name in ['Ubuntu', 'debian']:
            packages.extend(
                ['xfslibs-dev', 'uuid-dev', 'libuuid1',
                 'libattr1-dev', 'libacl1-dev', 'libgdbm-dev',
                 'uuid-runtime', 'libaio-dev', 'fio', 'dbench',
                 'gettext', 'libinih-dev', 'liburcu-dev', 'libblkid-dev',
                 'liblzo2-dev', 'zlib1g-dev', 'e2fslibs-dev', 'asciidoc',
                 'xmlto', 'libzstd-dev', 'libudev-dev'])
            if self.detected_distro.version in ['14']:
                packages.extend(['libtool'])
            elif self.detected_distro.version in ['18', '20']:
                packages.extend(['libtool-bin', 'libgdbm-compat-dev'])
            else:
                packages.extend(['libtool-bin'])

        elif self.detected_distro.name in ['centos', 'fedora', 'rhel', 'SuSE']:
            if self.dev_type == 'nvdimm':
                packages.extend(['ndctl', 'parted'])
                if self.detected_distro.name == 'rhel':
                    packages.extend(['daxctl'])
            packages.extend(['acl', 'bc', 'dump', 'indent', 'libtool', 'lvm2',
                             'xfsdump', 'psmisc', 'sed', 'libacl-devel',
                             'libattr-devel', 'libaio-devel', 'libuuid-devel',
                             'openssl-devel', 'xfsprogs-devel', 'gettext',
                             'libblkid-devel', 'lzo-devel', 'zlib-devel',
                             'e2fsprogs-devel', 'asciidoc', 'xmlto',
                             'libzstd-devel', 'systemd-devel', 'meson',
                             'gcc-c++'])

            if self.detected_distro.name == 'SuSE':
                packages.extend(['libbtrfs-devel', 'libcap-progs',
                                'liburcu-devel', 'libinih-devel'])
            else:
                packages.extend(['btrfs-progs-devel', 'userspace-rcu-devel'])

            packages_remove = ['indent', 'btrfs-progs-devel']
            if self.detected_distro.name == 'rhel' and\
                    self.detected_distro.version.startswith('8'):
                packages = list(set(packages)-set(packages_remove))
            elif self.detected_distro.name == 'rhel' and\
                    self.detected_distro.version.startswith('9'):
                packages_remove.extend(['dump'])
                packages = list(set(packages)-set(packages_remove))

            if self.detected_distro.name in ['centos', 'fedora']:
                packages.extend(['fio', 'dbench'])
        else:
            self.cancel("test not supported in %s" % self.detected_distro.name)

        for package in packages:
            if not sm.check_installed(package) and not sm.install(package):
                self.cancel("Fail to install %s required for this test." %
                            package)
        self.skip_dangerous = self.params.get('skip_dangerous', default=True)
        self.test_range = self.params.get('test_range', default=None)
        self.scratch_mnt = self.params.get(
            'scratch_mnt', default='/mnt/scratch')
        self.test_mnt = self.params.get('test_mnt', default='/mnt/test')
        self.disk_mnt = self.params.get('disk_mnt', default='/mnt/loop_device')
        self.fs_to_test = self.params.get('fs', default='ext4')
        self.run_type = self.params.get('run_type', default='distro')

        if self.run_type == 'upstream':
            prefix = "/usr/local"
            bin_prefix = "/usr/local/bin"

            if self.detected_distro.name == 'SuSE':
                # SuSE has /sbin at a higher priority than /usr/local/bin
                # in $PATH, so install all the binaries in /sbin to make
                # sure they are picked up correctly by xfstests.
                #
                # We still install in /usr/local but binaries are kept in
                # /sbin
                bin_prefix = "/sbin"

            if self.fs_to_test == "ext4":
                # Build e2fs progs
                e2fsprogs_dir = os.path.join(self.teststmpdir, 'e2fsprogs')
                if not os.path.exists(e2fsprogs_dir):
                    os.makedirs(e2fsprogs_dir)
                e2fsprogs_url = self.params.get('e2fsprogs_url')
                git.get_repo(e2fsprogs_url, destination_dir=e2fsprogs_dir)
                e2fsprogs_build_dir = os.path.join(e2fsprogs_dir, 'build')
                if not os.path.exists(e2fsprogs_build_dir):
                    os.makedirs(e2fsprogs_build_dir)
                os.chdir(e2fsprogs_build_dir)
                process.run("../configure --prefix=%s --bindir=%s --sbindir=%s"
                            % (prefix, bin_prefix, bin_prefix), verbose=True)
                build.make(e2fsprogs_build_dir)
                build.make(e2fsprogs_build_dir, extra_args='install')

            if self.fs_to_test == "xfs":
                if self.detected_distro.name in ['centos', 'fedora', 'rhel']:
                    libini_path = process.run("ldconfig -p | grep libini",
                                              verbose=True, ignore_status=True)
                    if not libini_path:
                        # Build libini.h as it is needed for xfsprogs
                        libini_dir = os.path.join(self.teststmpdir, 'libini')
                        if not os.path.exists(libini_dir):
                            os.makedirs(libini_dir)
                        git.get_repo('https://github.com/benhoyt/inih',
                                     destination_dir=libini_dir)
                        os.chdir(libini_dir)
                        process.run("meson build", verbose=True)
                        libini_build_dir = os.path.join(libini_dir, 'build')
                        if os.path.exists(libini_build_dir):
                            os.chdir(libini_build_dir)
                            process.run("meson install", verbose=True)
                        else:
                            self.fail('Something went wrong while building \
                                      libini. Please check the logs.')
                # Build xfs progs
                xfsprogs_dir = os.path.join(self.teststmpdir, 'xfsprogs')
                if not os.path.exists(xfsprogs_dir):
                    os.makedirs(xfsprogs_dir)
                xfsprogs_url = self.params.get('xfsprogs_url')
                git.get_repo(xfsprogs_url, destination_dir=xfsprogs_dir)
                os.chdir(xfsprogs_dir)
                build.make(xfsprogs_dir)
                process.run("./configure --prefix=%s --bindir=%s --sbindir=%s"
                            % (prefix, bin_prefix, bin_prefix), verbose=True)
                build.make(xfsprogs_dir, extra_args='install')

            if self.fs_to_test == "btrfs":
                # Build btrfs progs
                btrfsprogs_dir = os.path.join(self.teststmpdir, 'btrfsprogs')
                if not os.path.exists(btrfsprogs_dir):
                    os.makedirs(btrfsprogs_dir)
                btrfsprogs_url = self.params.get('btrfsprogs_url')
                git.get_repo(btrfsprogs_url, destination_dir=btrfsprogs_dir)
                os.chdir(btrfsprogs_dir)
                process.run("./autogen.sh", verbose=True)
                process.run("./configure --prefix=%s --bindir=%s --sbindir=%s --disable-documentation"
                            % (prefix, bin_prefix, bin_prefix), verbose=True)
                build.make(btrfsprogs_dir)
                build.make(btrfsprogs_dir, extra_args='install')

        # Check versions of fsprogs
        fsprogs_ver = process.system_output("mkfs.%s -V" % self.fs_to_test,
                                            ignore_status=True,
                                            shell=True).decode("utf-8")
        self.log.info(fsprogs_ver)

        if process.system('which mkfs.%s' % self.fs_to_test,
                          ignore_status=True):
            self.cancel('Unknown filesystem %s' % self.fs_to_test)
        mount = True
        self.devices = []
        self.log_devices = []
        shutil.copyfile(self.get_data('local.config'),
                        os.path.join(self.teststmpdir, 'local.config'))
        shutil.copyfile(self.get_data('group'),
                        os.path.join(self.teststmpdir, 'group'))

        self.log_test = self.params.get('log_test', default='')
        self.log_scratch = self.params.get('log_scratch', default='')

        if self.dev_type == 'loop':
            base_disk = self.params.get('disk', default=None)
            loop_size = self.params.get('loop_size', default='7GiB')
            if not base_disk:
                # Using root for file creation by default
                check = (int(loop_size.split('GiB')[0]) * 2) + 1
                if disk.freespace('/') / 1073741824 > check:
                    self.disk_mnt = ''
                    mount = False
                else:
                    self.cancel('Need %s GB to create loop devices' % check)
            self._create_loop_device(base_disk, loop_size, mount)
        elif self.dev_type == 'nvdimm':
            self.setup_nvdimm()
        else:
            self.test_dev = self.params.get('disk_test', default=None)
            self.scratch_dev = self.params.get('disk_scratch', default=None)
            self.devices.extend([self.test_dev, self.scratch_dev])
        # mkfs for devices
        if self.devices:
            cfg_file = os.path.join(self.teststmpdir, 'local.config')
            self.mkfs_opt = self.params.get('mkfs_opt', default='')
            self.mount_opt = self.params.get('mount_opt', default='')
            with open(cfg_file, "r") as sources:
                lines = sources.readlines()
            with open(cfg_file, "w") as sources:
                for line in lines:
                    if line.startswith('export TEST_DEV'):
                        sources.write(
                            re.sub(r'export TEST_DEV=.*', 'export TEST_DEV=%s'
                                   % self.devices[0], line))
                    elif line.startswith('export TEST_DIR'):
                        sources.write(
                            re.sub(r'export TEST_DIR=.*', 'export TEST_DIR=%s'
                                   % self.test_mnt, line))
                    elif line.startswith('export SCRATCH_DEV'):
                        sources.write(re.sub(
                            r'export SCRATCH_DEV=.*', 'export SCRATCH_DEV=%s'
                                                      % self.devices[1], line))
                    elif line.startswith('export SCRATCH_MNT'):
                        sources.write(
                            re.sub(
                                r'export SCRATCH_MNT=.*',
                                'export SCRATCH_MNT=%s' %
                                self.scratch_mnt,
                                line))
                        break
            with open(cfg_file, "a") as sources:
                if self.log_test:
                    sources.write('export USE_EXTERNAL=yes\n')
                    sources.write('export TEST_LOGDEV="%s"\n' % self.log_test)
                    self.log_devices.append(self.log_test)
                if self.log_scratch:
                    sources.write('export SCRATCH_LOGDEV="%s"\n' %
                                  self.log_scratch)
                    self.log_devices.append(self.log_scratch)
                if self.mkfs_opt:
                    sources.write('MKFS_OPTIONS="%s"\n' % self.mkfs_opt)
                if self.mount_opt:
                    sources.write('MOUNT_OPTIONS="%s"\n' % self.mount_opt)
            self.logdev_opt = self.params.get('logdev_opt', default='')
            for dev in self.log_devices:
                dev_obj = partition.Partition(dev)
                dev_obj.mkfs(fstype=self.fs_to_test, args=self.mkfs_opt)
            for ite, dev in enumerate(self.devices):
                dev_obj = partition.Partition(dev)
                if self.logdev_opt:
                    dev_obj.mkfs(fstype=self.fs_to_test, args='%s %s=%s' % (
                        self.mkfs_opt, self.logdev_opt, self.log_devices[ite]))
                else:
                    dev_obj.mkfs(fstype=self.fs_to_test, args=self.mkfs_opt)

        git.get_repo('git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git',
                     destination_dir=self.teststmpdir)

        build.make(self.teststmpdir)
        self.available_tests = self._get_available_tests()

        self.test_list = self._create_test_list(self.test_range)
        self.log.info("Tests available in srcdir: %s",
                      ", ".join(self.available_tests))
        if not self.test_range:
            self.exclude = self.params.get('exclude', default=None)
            self.gen_exclude = self.params.get('gen_exclude', default=None)
            self.share_exclude = self.params.get('share_exclude', default=None)
            if self.exclude or self.gen_exclude or self.share_exclude:
                self.exclude_file = os.path.join(self.teststmpdir, 'exclude')
                if self.exclude:
                    self._create_test_list(self.exclude, self.fs_to_test,
                                           dangerous=False)
                if self.gen_exclude:
                    self._create_test_list(self.gen_exclude, "generic",
                                           dangerous=False)
                if self.share_exclude:
                    self._create_test_list(self.share_exclude, "shared",
                                           dangerous=False)
        if self.detected_distro.name is not 'SuSE':
            if process.system('useradd 123456-fsgqa', sudo=True, ignore_status=True):
                self.log.warn('useradd 123456-fsgqa failed')
            if process.system('useradd fsgqa', sudo=True, ignore_status=True):
                self.log.warn('useradd fsgqa failed')
        else:
            if process.system('useradd -m -U fsgqa', sudo=True, ignore_status=True):
                self.log.warn('useradd fsgqa failed')
            if process.system('groupadd sys', sudo=True, ignore_status=True):
                self.log.warn('groupadd sys failed')
        if not os.path.exists(self.scratch_mnt):
            os.makedirs(self.scratch_mnt)
        if not os.path.exists(self.test_mnt):
            os.makedirs(self.test_mnt)
Example #54
0
def run(test, params, env):
    """
    Qemu host nic bonding test:
    1) Load bonding module with mode 802.3ad
    2) Bring up bond interface
    3) Add nics to bond interface
    4) Add a new bridge and add bond interface to it
    5) Get ip address for bridge
    6) Boot up guest with the bridge
    7) Checking guest netowrk via ping
    8) Start file transfer between guest and host
    9) Disable and enable physical interfaces during file transfer

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    bond_iface = params.get("bond_iface", "bond0")
    bond_br_name = params.get("bond_br_name", "br_bond0")
    timeout = int(params.get("login_timeout", 240))
    remote_host = params.get("dsthost")
    ping_timeout = int(params.get("ping_timeout", 240))
    bonding_timeout = int(params.get("bonding_timeout", 1))
    bonding_mode = params.get("bonding_mode", "1")
    bonding_miimon = params.get("bonding_miimon", "100")
    bonding_max_bonds = params.get("bonding_max_bonds", "1")
    params['netdst'] = bond_br_name
    host_bridges = utils_net.Bridge()

    error_context.context("Load bonding module with mode 802.3ad",
                          logging.info)
    if not process.system("lsmod|grep bonding", ignore_status=True,
                          shell=True):
        process.system("modprobe -r bonding")

    process.system("modprobe bonding mode=%s miimon=%s max_bonds=%s" %
                   (bonding_mode, bonding_miimon, bonding_max_bonds))

    error_context.context("Bring up %s" % bond_iface, logging.info)
    host_ifaces = utils_net.get_host_iface()

    if bond_iface not in host_ifaces:
        test.error("Can not find %s in host" % bond_iface)

    bond_iface = utils_net.Interface(bond_iface)
    bond_iface.up()
    bond_mac = bond_iface.get_mac()

    host_ph_iface_pre = params.get("host_ph_iface_prefix", "en")
    host_iface_bonding = int(params.get("host_iface_bonding", 2))

    ph_ifaces = [_ for _ in host_ifaces if re.match(host_ph_iface_pre, _)]
    host_ph_ifaces = [_ for _ in ph_ifaces if utils_net.Interface(_).is_up()]

    ifaces_in_use = host_bridges.list_iface()
    host_ph_ifaces_un = list(set(host_ph_ifaces) - set(ifaces_in_use))

    if (len(host_ph_ifaces_un) < 2
            or len(host_ph_ifaces_un) < host_iface_bonding):
        test.cancel("Host need %s nics at least." % host_iface_bonding)

    error_context.context("Add nics to %s" % bond_iface.name, logging.info)
    host_ifaces_bonding = host_ph_ifaces_un[:host_iface_bonding]
    ifenslave_cmd = "ifenslave %s" % bond_iface.name
    op_ifaces = []
    for host_iface_bonding in host_ifaces_bonding:
        op_ifaces.append(utils_net.Interface(host_iface_bonding))
        ifenslave_cmd += " %s" % host_iface_bonding
    process.system(ifenslave_cmd)

    error_context.context(
        "Add a new bridge and add %s to it." % bond_iface.name, logging.info)
    if bond_br_name not in host_bridges.list_br():
        host_bridges.add_bridge(bond_br_name)
    host_bridges.add_port(bond_br_name, bond_iface.name)

    error_context.context("Get ip address for bridge", logging.info)
    process.system("dhclient -r; dhclient %s" % bond_br_name, shell=True)

    error_context.context("Boot up guest with bridge %s" % bond_br_name,
                          logging.info)
    params["start_vm"] = "yes"
    vm_name = params.get("main_vm")
    env_process.preprocess_vm(test, params, env, vm_name)
    vm = env.get_vm(vm_name)
    session = vm.wait_for_login(timeout=timeout)

    error_context.context("Checking guest netowrk via ping.", logging.info)
    ping_cmd = params.get("ping_cmd")
    ping_cmd = re.sub("REMOTE_HOST", remote_host, ping_cmd)
    session.cmd(ping_cmd, timeout=ping_timeout)

    error_context.context("Start file transfer", logging.info)
    f_transfer = utils_misc.InterruptedThread(
        utils_test.run_virt_sub_test,
        args=(
            test,
            params,
            env,
        ),
        kwargs={"sub_type": "file_transfer"})
    f_transfer.start()
    utils_misc.wait_for(
        lambda: process.system_output("pidof scp", ignore_status=True), 30)

    error_context.context(
        "Disable and enable physical "
        "interfaces in %s" % bond_br_name, logging.info)
    while True:
        for op_iface in op_ifaces:
            logging.debug("Turn down %s", op_iface.name)
            op_iface.down()
            time.sleep(bonding_timeout)
            logging.debug("Bring up %s", op_iface.name)
            op_iface.up()
            time.sleep(bonding_timeout)
        if not f_transfer.is_alive():
            break
    f_transfer.join()
Example #55
0
 def __isSMT():
     if 'is not SMT capable' in process.system_output("ppc64_cpu --smt"):
         return False
     return True
Example #56
0
    def test(self):

        out = process.system_output(
            '%s ' % os.path.join(self.workdir, 'tests/validate'))
        if 'fail' in out:
            self.fail("test failed:check manually")
Example #57
0
 def bond_setup(self, arg1, arg2):
     '''
     bond setup
     '''
     if arg1 == "local":
         self.log.info("Configuring Bonding on Local machine")
         self.log.info("--------------------------------------")
         for ifs in self.host_interfaces:
             cmd = "ip addr flush dev %s" % ifs
             process.system(cmd, shell=True, ignore_status=True)
         for ifs in self.host_interfaces:
             cmd = "ifconfig %s down" % ifs
             process.system(cmd, shell=True, ignore_status=True)
         cmd = "modprobe bonding"
         process.system(cmd, shell=True, ignore_status=True)
         cmd = "echo +%s > /sys/class/net/bonding_masters" % self.bond_name
         process.system(cmd, shell=True, ignore_status=True)
         cmd = "echo %s > /sys/class/net/%s/bonding/mode"\
               % (arg2, self.bond_name)
         process.system(cmd, shell=True, ignore_status=True)
         cmd = "echo 100 > /sys/class/net/%s/bonding/miimon"\
               % self.bond_name
         process.system(cmd, shell=True, ignore_status=True)
         for val in self.host_interfaces:
             cmd = "echo '+%s' > %s"\
                   % (val, self.bonding_slave_file)
             if process.system(cmd, shell=True, ignore_status=True) != 0:
                 self.fail("Mode %s FAIL while bonding setup" % arg2)
             time.sleep(2)
         cmd = "cat /proc/net/bonding/%s | grep 'Bonding Mode' |\
               cut -d ':' -f 2" % self.bond_name
         bond_name_val = process.system_output(cmd, shell=True).strip('\n')
         self.log.info("Trying bond mode %s [ %s ]" % (arg2, bond_name_val))
         for ifs in self.host_interfaces:
             cmd = "ifconfig %s up" % ifs
             if process.system(cmd, shell=True, ignore_status=True) != 0:
                 self.fail("unable to interface up")
         cmd = "ifconfig %s %s netmask %s up"\
               % (self.bond_name, self.host_ips[0], self.net_mask[0])
         if process.system(cmd, shell=True, ignore_status=True) != 0:
             self.fail("bond setup command failed in local machine")
         time.sleep(5)
     else:
         self.log.info("Configuring Bonding on Peer machine")
         self.log.info("------------------------------------------")
         cmd = ''
         for val in self.peer_interfaces:
             cmd += 'ip addr flush dev %s;' % val
         for val in self.peer_interfaces:
             cmd += 'ifconfig %s down;' % val
         cmd += 'modprobe bonding;'
         cmd += 'echo +%s > /sys/class/net/bonding_masters;'\
                % self.bond_name
         cmd += 'echo 0 > /sys/class/net/%s/bonding/mode;'\
                % self.bond_name
         cmd += 'echo 100 > /sys/class/net/%s/bonding/miimon;'\
                % self.bond_name
         for val in self.peer_interfaces:
             cmd += 'echo "+%s" > %s;' % (val, self.bonding_slave_file)
         for val in self.peer_interfaces:
             cmd += 'ifconfig %s up;' % val
         cmd += 'ifconfig %s %s netmask %s up;sleep 5;'\
                % (self.bond_name,
                   self.peer_first_ipinterface, self.net_mask[0])
         peer_cmd = "ssh %s@%s \"%s\""\
                    % (self.user, self.peer_first_ipinterface, cmd)
         if process.system(peer_cmd, shell=True, ignore_status=True) != 0:
             self.fail("bond setup command failed in peer machine")
def run(test, params, env):
    """
    Cpu utilization test with hv flags.

    1)Start a Windows guest vm.
    2)Pin the vm to certain numa node, to keep accuracy.
    3)Stop serval Windows services & background processes on guest.
      to lower the cpu usage to minimum.
    4)Reboot vm to apply changes, then wait for serveral minutes to make
      sure the cpu is chill down.
    5)Start both checking the guest&host's cpu usage, monitoring the value.
    6)Compare the average utilization value to standard values.

    param test: the test object
    param params: the params of the test
    param env: the testing environment object
    """
    vm = env.get_vm(params["main_vm"])

    # pin guest vcpus/memory/vhost threads to last numa node of host
    _pin_vm_threads(vm, params.get_numeric("numa_node", -1))

    vm.verify_alive()

    timeout = params.get_numeric("login_timeout", 240)
    host_check_times = params.get_numeric("host_check_times", 900)
    host_check_interval = params.get_numeric("host_check_interval", 2)
    guest_check_timeout = host_check_times * host_check_interval
    thread_cpu_level = params.get_numeric("thread_cpu_level", 5)
    set_owner_cmd = params.get("set_owner_cmd")
    set_full_control_cmd = params.get("set_full_control_cmd")
    session = vm.wait_for_serial_login(timeout=timeout)
    do_migration = params.get("do_migration", "no") == "yes"

    service_names = params.get("serives_to_stop").split()

    # check and stop services
    for service in service_names:
        _stop_service(test, params, session, service)

    # stop windows defender
    if set_owner_cmd and set_full_control_cmd:
        set_owner_cmd = utils_misc.set_winutils_letter(session, set_owner_cmd)
        set_full_control_cmd = utils_misc.set_winutils_letter(
            session, set_full_control_cmd)
        session.cmd(set_owner_cmd)
        session.cmd(set_full_control_cmd)
    session.cmd(params["reg_cmd"])

    session = vm.reboot(session, timeout=timeout, serial=True)

    if do_migration:
        vm.migrate(env=env)
        session = vm.wait_for_serial_login(timeout=timeout)

    # wait for the guest to chill
    time.sleep(1800)

    # start background checking guest cpu usage
    thread = threading.Thread(target=_check_cpu_thread_func,
                              args=(session, guest_check_timeout))
    thread.start()
    time.sleep(60)

    # start checking host cpu usage
    pid = vm.get_pid()
    process.system(params["host_check_cmd"] % pid, shell=True)
    thread.join(guest_check_timeout + 360)

    vcpu_thread_pattern = params.get("vcpu_thread_pattern",
                                     r'thread_id.?[:|=]\s*(\d+)')
    vcpu_ids = vm.get_vcpu_pids(vcpu_thread_pattern)
    for thread_id in vcpu_ids:
        # output result
        host_cpu_usage = process.system_output(params["thread_process_cmd"] %
                                               thread_id,
                                               shell=True)
        host_cpu_usage = float(host_cpu_usage.decode())
        if host_cpu_usage > thread_cpu_level:
            test.fail("The cpu usage of thread %s is %s"
                      " > %s" % (thread_id, host_cpu_usage, thread_cpu_level))
Example #59
0
    def test(self):
        """
        Test disk devices with different operations of creating filesystem and
        mount it on a directory and verify it with certain parameters name,
        size, UUID and IO sizes etc
        """
        msg = []

        #get byid name
        self.disk_byid = os.path.basename(process.system_output("ls /dev/disk/by-id \
            -l| grep -i %s"                            % self.disk_abs, ignore_status=True, shell=True, \
            sudo=True).decode("utf-8").split("->")[1])
        self.log.info("byid name: %s", self.disk_byid)
        if process.system("ls /dev/disk/by-id -l| grep -i %s" % self.disk_byid,
                          ignore_status=True,
                          shell=True,
                          sudo=True) != 0:
            msg.append("Given disk %s is not in /dev/disk/by-id" %
                       self.disk_abs)
        for disk_node in self.disk_nodes:
            if process.system(
                    "ls /dev/disk/by-path -l| grep -i %s" % disk_node,
                    ignore_status=True,
                    shell=True,
                    sudo=True) != 0:
                msg.append("Given disk %s is not in /dev/disk/by-path" %
                           disk_node)

        # Verify disk listed in all tools
        if self.mpath:
            cmd_list = ["fdisk -l ", "lsblk "]
        else:
            cmd_list = ["fdisk -l ", "parted -l", "lsblk ", "lshw -c disk "]
        if self.distro == 'Ubuntu':
            cmd_list.append("hwinfo --short --block")
        for cmd in cmd_list:
            cmd = cmd + " | grep -i %s" % self.disk_base
            if process.system(cmd, ignore_status=True, shell=True,
                              sudo=True) != 0:
                msg.append("Given disk %s is not present in %s" %
                           (self.disk_base, cmd))
        if self.mpath:
            for disk_node in self.disk_nodes:
                if process.system("lshw -c disk | grep -i %s" % disk_node,
                                  ignore_status=True,
                                  shell=True,
                                  sudo=True) != 0:
                    msg.append("Given disk %s is not in lshw -c disk" %
                               disk_node)

        # Get the size and UUID of the disk
        cmd = "lsblk -l %s --output SIZE -b |sed -n 2p" % self.disk
        output = process.system_output(cmd,
                                       ignore_status=True,
                                       shell=True,
                                       sudo=True).decode("utf-8")
        if not output:
            self.cancel("No information available in lsblk")
        self.size_b = (output.strip("\n"))[0]
        self.log.info("Disk: %s Size: %s", self.disk, self.size_b)

        # Get the physical/logical and minimal/optimal sector sizes
        pbs_sysfs = "/sys/block/%s/queue/physical_block_size" % self.disk_byid
        pbs = genio.read_file(pbs_sysfs).rstrip("\n")
        lbs_sysfs = "/sys/block/%s/queue/logical_block_size" % self.disk_byid
        lbs = genio.read_file(lbs_sysfs).rstrip("\n")
        mis_sysfs = "/sys/block/%s/queue/minimum_io_size" % self.disk_byid
        mis = genio.read_file(mis_sysfs).rstrip("\n")
        ois_sysfs = "/sys/block/%s/queue/optimal_io_size" % self.disk_byid
        ois = genio.read_file(ois_sysfs).rstrip("\n")
        self.log.info("pbs: %s, lbs: %s, mis: %s, ois: %s", pbs, lbs, mis, ois)

        # Verify sector sizes
        sector_string = "Sector size (logical/physical): %s " \
                        "bytes / %s bytes" % (lbs, pbs)
        output = process.system_output("fdisk -l %s" % self.disk,
                                       ignore_status=True,
                                       shell=True,
                                       sudo=True).decode("utf-8")
        if sector_string not in output:
            msg.append("Mismatch in sector sizes of lbs,pbs in "
                       "fdisk o/p w.r.t sysfs paths")
        io_size_string = "I/O size (minimum/optimal): %s " \
                         "bytes / %s bytes" % (mis, mis)
        if io_size_string not in output:
            msg.append("Mismatch in IO sizes of mis and ois"
                       " in fdisk o/p w.r.t sysfs paths")

        # Verify disk size in other tools
        cmd = "fdisk -l %s | grep -i %s" % (self.disk, self.disk)
        if self.size_b not in process.system_output(cmd,
                                                    ignore_status=True,
                                                    shell=True,
                                                    sudo=True).decode("utf-8"):
            msg.append("Size of disk %s mismatch in fdisk o/p" % self.disk)
        cmd = "sfdisk -l %s | grep -i %s" % (self.disk, self.disk)
        if self.size_b not in process.system_output(cmd,
                                                    ignore_status=True,
                                                    shell=True,
                                                    sudo=True).decode("utf-8"):
            msg.append("Size of disk %s mismatch in sfdisk o/p" % self.disk)
Example #60
0
 def setUp(self):
     """
     Verifies if we have list of packages installed on OS
     and also skips the test if user gives the current OS boot disk as
     disk input it may erase the data
     :param disk: test disk where the disk operations can be done
     :param fs: type of filesystem to create
     :param dir: path of the directory to mount the disk device
     """
     smm = SoftwareManager()
     pkg = ""
     if 'ppc' not in platform.processor():
         self.cancel("Processor is not ppc64")
     self.disk = self.params.get('disk', default=None)
     self.dirs = self.params.get('dir', default=self.workdir)
     self.fstype = self.params.get('fs', default='ext4')
     self.log.info("disk: %s, dir: %s, fstype: %s", self.disk, self.dirs,
                   self.fstype)
     if not self.disk:
         self.cancel("No disk input, please update yaml and re-run")
     cmd = "df --output=source"
     if self.disk in process.system_output(cmd, ignore_status=True) \
             .decode("utf-8"):
         self.cancel("Given disk is os boot disk,"
                     "it will be harmful to run this test")
     pkg_list = ["lshw"]
     self.distro = distro.detect().name
     if self.distro == 'Ubuntu':
         pkg_list.append("hwinfo")
     if self.fstype == 'ext4':
         pkg_list.append('e2fsprogs')
     if self.fstype == 'xfs':
         pkg_list.append('xfsprogs')
     if self.fstype == 'btrfs':
         ver = int(distro.detect().version)
         rel = int(distro.detect().release)
         if distro.detect().name == 'rhel':
             if (ver == 7 and rel >= 4) or ver > 7:
                 self.cancel("btrfs is not supported with \
                             RHEL 7.4 onwards")
         if self.distro == 'Ubuntu':
             pkg_list.append("btrfs-tools")
     for pkg in pkg_list:
         if pkg and not smm.check_installed(pkg) and not smm.install(pkg):
             self.cancel(
                 "Package %s is missing and could not be installed" % pkg)
     self.disk_nodes = []
     self.disk_base = os.path.basename(self.disk)
     if multipath.is_path_a_multipath(self.disk_base):
         self.mpath = True
         self.disk_abs = self.disk_base
         all_wwids = multipath.get_multipath_wwids()
         for wwid in all_wwids:
             paths = multipath.get_paths(wwid)
             for path in paths:
                 if path == self.disk_abs:
                     #wwid of mpath our disk is on
                     mwwid = wwid
         self.disk_nodes = multipath.get_paths(mwwid)
     else:
         self.mpath = False
         self.disk_abs = self.disk_base
         self.disk_nodes.append(self.disk_base)