예제 #1
0
    def setUp(self):
        '''
        Sets all the reqd parameter and also
        mounts the tmpfs to be used in test.
        '''

        # Set params as per available memory in system
        self.mem_path = self.params.get("t_dir",
                                        default=os.path.join(
                                            data_dir.get_tmp_dir(),
                                            'thp_space'))
        free_mem = self.params.get("mem_size",
                                   default=memory.meminfo.MemFree.m)
        self.dd_timeout = self.params.get("dd_timeout", default=900)
        self.thp_split = None
        try:
            memory.read_from_vmstat("thp_split_page")
            self.thp_split = "thp_split_page"
        except IndexError:
            self.thp_split = "thp_split"

        # Set block size as hugepage size * 2
        self.block_size = memory.meminfo.Hugepagesize.m * 2
        self.count = free_mem // self.block_size

        # Mount device as per free memory size
        if not os.path.exists(self.mem_path):
            os.makedirs(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path,
                          fstype="tmpfs",
                          args='-o size=%dM' % free_mem)
예제 #2
0
    def setUp(self):
        '''
        Sets the Required params for dd and mounts the tmpfs dir
        '''

        self.swap_free = []
        mem_free = memory.meminfo.MemFree.m
        mem = memory.meminfo.MemTotal.m
        swap = memory.meminfo.SwapTotal.m
        self.hugepage_size = memory.meminfo.Hugepagesize.m
        self.swap_free.append(memory.meminfo.SwapFree.m)
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        self.dd_timeout = 900

        # If swap is enough fill all memory with dd
        if self.swap_free[0] > (mem - mem_free):
            self.count = (mem // self.hugepage_size) // 2
            tmpfs_size = mem
        else:
            self.count = (mem_free // self.hugepage_size) // 2
            tmpfs_size = mem_free

        if swap <= 0:
            self.cancel("Swap is not enabled in the system")

        if not os.path.ismount(self.mem_path):
            if not os.path.isdir(self.mem_path):
                os.makedirs(self.mem_path)
            self.device = Partition(device="none", mountpoint=self.mem_path)
            self.device.mount(mountpoint=self.mem_path,
                              fstype="tmpfs",
                              args="-o size=%sM" % tmpfs_size,
                              mnt_check=False)
예제 #3
0
 def setUp(self):
     sm = SoftwareManager()
     deps = ['gcc', 'make', 'gawk']
     self.build_dir = self.params.get('build_dir',
                                      default=data_dir.get_tmp_dir())
     for package in deps:
         if not sm.check_installed(package) and not sm.install(package):
             self.cancel('%s is needed for the test to be run' % package)
     run_type = self.params.get('type', default='upstream')
     if run_type == "upstream":
         url = 'https://github.com/bminor/glibc/archive/master.zip'
         tarball = self.fetch_asset("glibc.zip",
                                    locations=[url],
                                    expire='7d')
         archive.extract(tarball, self.workdir)
         glibc_dir = os.path.join(self.workdir, "glibc-master")
     elif run_type == "distro":
         glibc_dir = os.path.join(self.workdir, "glibc-distro")
         if not os.path.exists(glibc_dir):
             os.makedirs(glibc_dir)
         glibc_dir = sm.get_source("glibc", glibc_dir)
     os.chdir(self.build_dir)
     process.run('%s/configure --prefix=%s' %
                 (glibc_dir, self.params.get("prefix", default="/usr")),
                 ignore_status=True,
                 sudo=True)
     build.make(self.build_dir)
예제 #4
0
 def test(self):
     self.tmpdir = data_dir.get_tmp_dir()
     # Read USAGE in Unixbench directory in src to give the args
     args = self.params.get('args', default='-v -c 1')
     process.system(' ./Run ' + args, shell=True, sudo=True)
     report_path = os.path.join(self.logdir, 'stdout')
     self.report_data = open(report_path).readlines()
예제 #5
0
 def setUp(self):
     sm = SoftwareManager()
     detected_distro = distro.detect()
     self.tmpdir = data_dir.get_tmp_dir()
     # Check for basic utilities
     for package in ['gcc', 'make', 'gfortran']:
         if detected_distro.name == "SuSE" and package == "gfortran":
             package = 'gcc-fortran'
         if detected_distro.name == "redhat" and package == "gfortran":
             package = 'gcc-gfortran'
         if not sm.check_installed(package) and not sm.install(package):
             self.error(package + ' is needed for the test to be run')
     atlas_url = 'https://sourceforge.net/projects/'\
                 'math-atlas/files/Stable/3.10.3/atlas3.10.3.tar.bz2'
     lapack_url = 'http://www.netlib.org/lapack/lapack-3.6.1.tgz'
     atlas_url = self.params.get('atlas_url', default=atlas_url)
     lapack_url = self.params.get('lapack_url', default=lapack_url)
     atlas_tarball = self.fetch_asset(atlas_url, expire='7d')
     archive.extract(atlas_tarball, self.srcdir)
     self.atlas_dir = os.path.join(self.srcdir, 'ATLAS')
     self.atlas_build_dir = os.path.join(self.atlas_dir, 'atlas_build_dir')
     os.makedirs(self.atlas_build_dir)
     lapack_tarball = self.fetch_asset(lapack_url, expire='7d')
     os.chdir(self.atlas_build_dir)
     config_args = '--shared -b 64 '\
                   '--with-netlib-lapack-tarfile=%s' % lapack_tarball
     config_args = self.params.get('config_args', default=config_args)
     process.system('../configure %s' % config_args)
     # Tune and compile library
     build.make(self.atlas_build_dir)
    def setUp(self):
        '''
        Sets all the reqd parameter and also
        mounts the tmpfs to be used in test.
        '''

        # Set params as per available memory in system
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        free_mem = int(memory.freememtotal() / 1024)
        self.dd_timeout = 900
        self.thp_split = None
        try:
            memory.read_from_vmstat("thp_split_page")
            self.thp_split = "thp_split_page"
        except IndexError:
            self.thp_split = "thp_split"

        # Set block size as hugepage size * 2
        self.block_size = (memory.get_huge_page_size() / 1024) * 2
        self.count = free_mem / self.block_size

        # Mount device as per free memory size
        if not os.path.exists(self.mem_path):
            os.makedirs(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path, fstype="tmpfs",
                          args='-o size=%dM' % free_mem)
예제 #7
0
 def setUp(self):
     sm = SoftwareManager()
     detected_distro = distro.detect()
     self.tmpdir = data_dir.get_tmp_dir()
     # Check for basic utilities
     for package in ['gcc', 'make', 'gfortran']:
         if detected_distro.name == "SuSE" and package == "gfortran":
             package = 'gcc-fortran'
         if detected_distro.name == "redhat" and package == "gfortran":
             package = 'gcc-gfortran'
         if not sm.check_installed(package) and not sm.install(package):
             self.error(package + ' is needed for the test to be run')
     atlas_url = 'https://sourceforge.net/projects/'\
                 'math-atlas/files/Stable/3.10.3/atlas3.10.3.tar.bz2'
     lapack_url = 'http://www.netlib.org/lapack/lapack-3.6.1.tgz'
     atlas_url = self.params.get('atlas_url', default=atlas_url)
     lapack_url = self.params.get('lapack_url', default=lapack_url)
     atlas_tarball = self.fetch_asset(atlas_url, expire='7d')
     archive.extract(atlas_tarball, self.srcdir)
     self.atlas_dir = os.path.join(self.srcdir, 'ATLAS')
     self.atlas_build_dir = os.path.join(self.atlas_dir, 'atlas_build_dir')
     os.makedirs(self.atlas_build_dir)
     lapack_tarball = self.fetch_asset(lapack_url, expire='7d')
     os.chdir(self.atlas_build_dir)
     config_args = '--shared -b 64 '\
                   '--with-netlib-lapack-tarfile=%s' % lapack_tarball
     config_args = self.params.get('config_args', default=config_args)
     process.system('../configure %s' % config_args)
     # Tune and compile library
     build.make(self.atlas_build_dir)
    def setUp(self):
        '''
        Sets the Required params for dd and mounts the tmpfs dir
        '''

        self.swap_free = []
        mem_free = memory.meminfo.MemFree.m
        mem = memory.meminfo.MemTotal.m
        swap = memory.meminfo.SwapTotal.m
        self.hugepage_size = memory.meminfo.Hugepagesize.m
        self.swap_free.append(memory.meminfo.SwapFree.m)
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        self.dd_timeout = 900

        # If swap is enough fill all memory with dd
        if self.swap_free[0] > (mem - mem_free):
            self.count = (mem / self.hugepage_size) / 2
            tmpfs_size = mem
        else:
            self.count = (mem_free / self.hugepage_size) / 2
            tmpfs_size = mem_free

        if swap <= 0:
            self.cancel("Swap is not enabled in the system")

        if not os.path.ismount(self.mem_path):
            if not os.path.isdir(self.mem_path):
                os.makedirs(self.mem_path)
            self.device = Partition(device="none", mountpoint=self.mem_path)
            self.device.mount(mountpoint=self.mem_path, fstype="tmpfs",
                              args="-o size=%sM" % tmpfs_size)
예제 #9
0
 def setUp(self):
     sm = SoftwareManager()
     detected_distro = distro.detect()
     self.tmpdir = data_dir.get_tmp_dir()
     # Check for basic utilities
     for package in ['gcc', 'make', 'gfortran']:
         if detected_distro.name == "SuSE" and package == "gfortran":
             package = 'gcc-fortran'
         # FIXME: "redhat" as the distro name for RHEL is deprecated
         # on Avocado versions >= 50.0.  This is a temporary compatibility
         # enabler for older runners, but should be removed soon
         if detected_distro.name in ["rhel", "redhat"] and package == "gfortran":
             package = 'gcc-gfortran'
         if not sm.check_installed(package) and not sm.install(package):
             self.error(package + ' is needed for the test to be run')
     atlas_url = 'https://sourceforge.net/projects/'\
                 'math-atlas/files/Stable/3.10.3/atlas3.10.3.tar.bz2'
     lapack_url = 'http://www.netlib.org/lapack/lapack-3.6.1.tgz'
     atlas_url = self.params.get('atlas_url', default=atlas_url)
     lapack_url = self.params.get('lapack_url', default=lapack_url)
     atlas_tarball = self.fetch_asset(atlas_url, expire='7d')
     archive.extract(atlas_tarball, self.workdir)
     self.atlas_dir = os.path.join(self.workdir, 'ATLAS')
     self.atlas_build_dir = os.path.join(self.atlas_dir, 'atlas_build_dir')
     os.makedirs(self.atlas_build_dir)
     lapack_tarball = self.fetch_asset(lapack_url, expire='7d')
     os.chdir(self.atlas_build_dir)
     config_args = '--shared -b 64 '\
                   '--with-netlib-lapack-tarfile=%s '\
                   '--cripple-atlas-performance' % lapack_tarball
     config_args = self.params.get('config_args', default=config_args)
     process.system('../configure %s' % config_args)
     # Tune and compile library
     build.make(self.atlas_build_dir)
예제 #10
0
def get_tmp_dir(public=True):
    """
    Get the most appropriate tmp dir location.

    :param public: If public for all users' access
    """
    persistent_dir = get_settings_value('vt.common', 'tmp_dir', default="")
    if persistent_dir != "":
        return persistent_dir
    tmp_dir = None
    # apparmor deny /tmp/* /var/tmp/* and cause failure across tests
    # it is better to handle here
    if distro.detect().name == 'Ubuntu':
        tmp_dir = "/var/lib/libvirt/images"
        if not utils_path.usable_rw_dir(tmp_dir):
            logging.warning("Unable to write in '/var/lib/libvirt/images' "
                            "on Ubuntu, apparmor might complain...")
            tmp_dir = None
    tmp_dir = data_dir.get_tmp_dir(basedir=tmp_dir)
    if public:
        tmp_dir_st = os.stat(tmp_dir)
        os.chmod(
            tmp_dir, tmp_dir_st.st_mode | stat.S_IXUSR | stat.S_IXGRP
            | stat.S_IXOTH | stat.S_IRGRP | stat.S_IROTH)
    return tmp_dir
예제 #11
0
 def setUp(self):
     sm = SoftwareManager()
     detected_distro = distro.detect()
     self.tmpdir = data_dir.get_tmp_dir()
     # Check for basic utilities
     for package in ['gcc', 'make', 'gfortran']:
         if detected_distro.name == "SuSE" and package == "gfortran":
             package = 'gcc-fortran'
         # FIXME: "redhat" as the distro name for RHEL is deprecated
         # on Avocado versions >= 50.0.  This is a temporary compatibility
         # enabler for older runners, but should be removed soon
         if detected_distro.name in ["rhel", "redhat"
                                     ] and package == "gfortran":
             package = 'gcc-gfortran'
         if not sm.check_installed(package) and not sm.install(package):
             self.cancel('%s is needed for the test to be run' % package)
     atlas_url = 'https://sourceforge.net/projects/'\
                 'math-atlas/files/Stable/3.10.3/atlas3.10.3.tar.bz2'
     lapack_url = 'http://www.netlib.org/lapack/lapack-3.6.1.tgz'
     atlas_url = self.params.get('atlas_url', default=atlas_url)
     lapack_url = self.params.get('lapack_url', default=lapack_url)
     atlas_tarball = self.fetch_asset(atlas_url, expire='7d')
     archive.extract(atlas_tarball, self.workdir)
     self.atlas_dir = os.path.join(self.workdir, 'ATLAS')
     self.atlas_build_dir = os.path.join(self.atlas_dir, 'atlas_build_dir')
     os.makedirs(self.atlas_build_dir)
     lapack_tarball = self.fetch_asset(lapack_url, expire='7d')
     os.chdir(self.atlas_build_dir)
     config_args = '--shared -b 64 '\
                   '--with-netlib-lapack-tarfile=%s '\
                   '--cripple-atlas-performance' % lapack_tarball
     config_args = self.params.get('config_args', default=config_args)
     process.system('../configure %s' % config_args)
     # Tune and compile library
     build.make(self.atlas_build_dir)
예제 #12
0
파일: datadir.py 프로젝트: FengYang/avocado
 def run(self, args):
     view = output.View()
     view.notify(event="message", msg='Avocado Data Directories:')
     view.notify(event="message", msg='    base dir        ' + data_dir.get_base_dir())
     view.notify(event="message", msg='    tests dir       ' + data_dir.get_test_dir())
     view.notify(event="message", msg='    data dir        ' + data_dir.get_data_dir())
     view.notify(event="message", msg='    logs dir        ' + data_dir.get_logs_dir())
     view.notify(event="message", msg='    tmp dir         ' + data_dir.get_tmp_dir())
예제 #13
0
 def test(self):
     self.tmpdir = data_dir.get_tmp_dir()
     #Read USAGE in Unixbench directory in src to give the args
     args = self.params.get('args', default='-v -c 1')
     os.chdir(self.srcdir)
     process.system(' ./Run ' + args, shell=True, sudo=True)
     report_path = os.path.join(self.logdir, 'stdout')
     self.report_data = open(report_path).readlines()
예제 #14
0
파일: datadir.py 프로젝트: eduardok/avocado
 def list_data_dirs(self, args):
     bcolors = output.colors
     pipe = output.get_paginator()
     pipe.write(bcolors.header_str('Avocado Data Directories:'))
     pipe.write('\n    base dir:        ' + data_dir.get_base_dir())
     pipe.write('\n    tests dir:       ' + data_dir.get_test_dir())
     pipe.write('\n    data dir:        ' + data_dir.get_data_dir())
     pipe.write('\n    logs dir:        ' + data_dir.get_logs_dir())
     pipe.write('\n    tmp dir:         ' + data_dir.get_tmp_dir())
예제 #15
0
def get_tmp_dir(public=True):
    """
    Get the most appropriate tmp dir location.

    :param public: If public for all users' access
    """
    tmp_dir = data_dir.get_tmp_dir()
    if public:
        tmp_dir_st = os.stat(tmp_dir)
        os.chmod(tmp_dir, tmp_dir_st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IRGRP | stat.S_IROTH)
    return tmp_dir
예제 #16
0
def get_tmp_dir(public=True):
    """
    Get the most appropriate tmp dir location.

    :param public: If public for all users' access
    """
    tmp_dir = data_dir.get_tmp_dir()
    if public:
        tmp_dir_st = os.stat(tmp_dir)
        os.chmod(tmp_dir, tmp_dir_st.st_mode | stat.S_IXUSR |
                 stat.S_IXGRP | stat.S_IXOTH | stat.S_IRGRP | stat.S_IROTH)
    return tmp_dir
예제 #17
0
 def test(self):
     parallel_procs = []
     self.tmpdir = data_dir.get_tmp_dir()
     os.chdir(self.tmpdir)
     # This is the reference copy of the linux tarball
     # that will be used for subsequent comparisons
     self.log.info('Unpacking base copy')
     self.base_dir = os.path.join(self.tmpdir, 'linux.orig')
     archive.extract(self.tarball, self.base_dir)
     self.log.info('Unpacking test copies')
     for j in range(self.sim_cps):
         tmp_dir = 'linux.%s' % j
         if self.parallel:
             os.mkdir(tmp_dir)
             # Start parallel process
             tar_cmd = 'tar jxf ' + self.tarball + ' -C ' + tmp_dir
             self.log.info("Unpacking tarball to %s", tmp_dir)
             obj = process.SubProcess(cmd=tar_cmd,
                                      verbose=False,
                                      shell=True)
             obj.start()
             parallel_procs.append(obj)
         else:
             self.log.info("Unpacking tarball to %s", tmp_dir)
             archive.extract(self.tarball, tmp_dir)
     # Wait for the subprocess before comparison
     if self.parallel:
         self.log.info("Wait background processes before proceed")
         for proc in parallel_procs:
             proc.wait()
     parallel_procs = []
     self.log.info('Comparing test copies with base copy')
     for j in range(self.sim_cps):
         kernel_ver = os.path.basename(self.tarball).strip('.tar.bz2')
         tmp_dir = 'linux.%s/%s' % (j, kernel_ver)
         if self.parallel:
             diff_cmd = 'diff -U3 -rN linux.orig/' + kernel_ver
             diff_cmd += ' ' + tmp_dir
             self.log.info("Comparing linux.orig with %s", tmp_dir)
             obj = process.SubProcess(cmd=diff_cmd,
                                      verbose=False,
                                      shell=True)
             obj.start()
             parallel_procs.append(obj)
         else:
             try:
                 self.log.info('Comparing linux.orig with %s', tmp_dir)
                 process.system('diff -U3 -rN linux.orig linux.%s' % j)
             except process.CmdError, error:
                 self.nfail += 1
                 self.log.info('Error comparing trees: %s', error)
예제 #18
0
 def test(self):
     parallel_procs = []
     self.tmpdir = data_dir.get_tmp_dir()
     os.chdir(self.tmpdir)
     # This is the reference copy of the linux tarball
     # that will be used for subsequent comparisons
     self.log.info('Unpacking base copy')
     self.base_dir = os.path.join(self.tmpdir, 'linux.orig')
     archive.extract(self.tarball, self.base_dir)
     self.log.info('Unpacking test copies')
     for j in range(self.sim_cps):
         tmp_dir = 'linux.%s' % j
         if self.parallel:
             os.mkdir(tmp_dir)
             # Start parallel process
             tar_cmd = 'tar jxf ' + self.tarball + ' -C ' + tmp_dir
             self.log.info("Unpacking tarball to %s", tmp_dir)
             obj = process.SubProcess(cmd=tar_cmd, verbose=False,
                                      shell=True)
             obj.start()
             parallel_procs.append(obj)
         else:
             self.log.info("Unpacking tarball to %s", tmp_dir)
             archive.extract(self.tarball, tmp_dir)
     # Wait for the subprocess before comparison
     if self.parallel:
         self.log.info("Wait background processes before proceed")
         for proc in parallel_procs:
             proc.wait()
     parallel_procs = []
     self.log.info('Comparing test copies with base copy')
     for j in range(self.sim_cps):
         kernel_ver = os.path.basename(self.tarball).strip('.tar.bz2')
         tmp_dir = 'linux.%s/%s' % (j, kernel_ver)
         if self.parallel:
             diff_cmd = 'diff -U3 -rN linux.orig/' + kernel_ver
             diff_cmd += ' ' + tmp_dir
             self.log.info("Comparing linux.orig with %s", tmp_dir)
             obj = process.SubProcess(cmd=diff_cmd, verbose=False,
                                      shell=True)
             obj.start()
             parallel_procs.append(obj)
         else:
             try:
                 self.log.info('Comparing linux.orig with %s', tmp_dir)
                 process.system('diff -U3 -rN linux.orig linux.%s' % j)
             except process.CmdError, error:
                 self.nfail += 1
                 self.log.info('Error comparing trees: %s', error)
예제 #19
0
    def power_on(self):
        assert not self.is_on()

        self.monitor_socket = tempfile.mktemp(dir=data_dir.get_tmp_dir())
        self.devices.add_qmp_monitor(self.monitor_socket)
        self._qmp = monitor.QEMUMonitorProtocol(self.monitor_socket,
                                                server=True)
        self.serial_socket = tempfile.mktemp(dir=data_dir.get_tmp_dir())
        self.devices.add_serial(self.serial_socket)
        if self.params.get('kvm', '/plugins/virt/qemu/*') != "off" and \
                os.access('/dev/kvm', os.W_OK):
            self.log('Using KVM')
            self.devices.add_cmdline("-enable-kvm")
        else:
            self.log('/dev/kvm not accessible, not using KVM')

        tmpl = self.params.get('contents', '/plugins/virt/qemu/template/*')

        if tmpl is None:
            cmdline = self.devices.get_cmdline()
        else:
            tags = self._template_build_tags()
            cmdline = self._template_apply(tmpl, tags)

        self._popen = process.SubProcess(cmd=cmdline)
        self.pid = self._popen.start()
        self._qmp.accept()
        self.serial_console = aexpect.ShellSession(
            "nc -U %s" % self.serial_socket,
            auto_close=False,
            output_func=genio.log_line,
            output_params=("serial-console-%s.log" % self.short_id, ),
            prompt=self.params.get("shell_prompt",
                                   "/plugins/virt/guest/*",
                                   default="[\#\$]"))
        self._screendump_thread_start()
    def setUp(self):
        '''
        Sets required params for dd workload and mounts the tmpfs
        '''

        # Get required mem info
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        self.block_size = int(mmap.PAGESIZE) / 1024
        # add mount point
        os.mkdir(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path, fstype="tmpfs")
        free_space = (disk.freespace(self.mem_path)) / 1024
        # Leaving out some free space in tmpfs
        self.count = (free_space / self.block_size) - 3
예제 #21
0
 def setUp(self):
     sm = SoftwareManager()
     deps = ['gcc', 'make', 'gawk']
     self.build_dir = self.params.get('build_dir',
                                      default=data_dir.get_tmp_dir())
     for package in deps:
         if not sm.check_installed(package) and not sm.install(package):
             self.error(package + ' is needed for the test to be run')
     url = 'https://github.com/bminor/glibc/archive/master.zip'
     tarball = self.fetch_asset("glibc.zip", locations=[url], expire='7d')
     archive.extract(tarball, self.srcdir)
     glibc_dir = os.path.join(self.srcdir, "glibc-master")
     os.chdir(self.build_dir)
     process.run(glibc_dir + '/configure --prefix=%s' % self.build_dir,
                 ignore_status=True, sudo=True)
     build.make(self.build_dir)
    def setUp(self):
        '''
        Sets required params for dd workload and mounts the tmpfs
        '''

        # Get required mem info
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        self.block_size = int(mmap.PAGESIZE) / 1024
        # add mount point
        if os.path.exists(self.mem_path):
            os.makedirs(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path, fstype="tmpfs")
        free_space = (disk.freespace(self.mem_path)) / 1024
        # Leaving out some free space in tmpfs
        self.count = (free_space / self.block_size) - 3
예제 #23
0
def get_tmp_dir(public=True):
    """
    Get the most appropriate tmp dir location.

    :param public: If public for all users' access
    """
    tmp_dir = None
    # apparmor deny /tmp/* /var/tmp/* and cause failure across tests
    # it is better to handle here
    if distro.detect().name == 'Ubuntu':
        tmp_dir = "/var/lib/libvirt/images"
    tmp_dir = data_dir.get_tmp_dir(basedir=tmp_dir)
    if public:
        tmp_dir_st = os.stat(tmp_dir)
        os.chmod(tmp_dir, tmp_dir_st.st_mode | stat.S_IXUSR |
                 stat.S_IXGRP | stat.S_IXOTH | stat.S_IRGRP | stat.S_IROTH)
    return tmp_dir
예제 #24
0
 def setUp(self):
     smm = SoftwareManager()
     # Check for basic utilities
     self.tmpdir = data_dir.get_tmp_dir()
     self.report_data = self.err = None
     self.build_dir = self.params.get('build_dir', default=self.tmpdir)
     for package in ['gcc', 'make', 'patch']:
         if not smm.check_installed(package) and not smm.install(package):
             self.cancel('%s is needed for the test to be run' % package)
     url = 'https://github.com/kdlucas/byte-unixbench/archive/master.zip'
     tarball = self.fetch_asset("byte-unixbench.zip", locations=[url],
                                expire='7d')
     archive.extract(tarball, self.workdir)
     self.sourcedir = os.path.join(self.workdir,
                                   "byte-unixbench-master/UnixBench")
     os.chdir(self.sourcedir)
     build.make(self.sourcedir)
예제 #25
0
def get_tmp_dir(public=True):
    """
    Get the most appropriate tmp dir location.

    :param public: If public for all users' access
    """
    tmp_dir = None
    # apparmor deny /tmp/* /var/tmp/* and cause failure across tests
    # it is better to handle here
    if distro.detect().name == 'Ubuntu':
        tmp_dir = "/var/lib/libvirt/images"
    tmp_dir = data_dir.get_tmp_dir(basedir=tmp_dir)
    if public:
        tmp_dir_st = os.stat(tmp_dir)
        os.chmod(tmp_dir, tmp_dir_st.st_mode | stat.S_IXUSR |
                 stat.S_IXGRP | stat.S_IXOTH | stat.S_IRGRP | stat.S_IROTH)
    return tmp_dir
예제 #26
0
 def setUp(self):
     smm = SoftwareManager()
     # Check for basic utilities
     self.tmpdir = data_dir.get_tmp_dir()
     self.report_data = self.err = None
     self.build_dir = self.params.get('build_dir', default=self.tmpdir)
     for package in ['gcc', 'make', 'patch']:
         if not smm.check_installed(package) and not smm.install(package):
             self.cancel('%s is needed for the test to be run' % package)
     url = 'https://github.com/kdlucas/byte-unixbench/archive/master.zip'
     tarball = self.fetch_asset("byte-unixbench.zip",
                                locations=[url],
                                expire='7d')
     archive.extract(tarball, self.srcdir)
     self.sourcedir = os.path.join(self.srcdir,
                                   "byte-unixbench-master/UnixBench")
     os.chdir(self.sourcedir)
     build.make(self.sourcedir)
예제 #27
0
    def setUp(self):
        """
        Downloads a copy of the linux kernel, calculate an estimated size of
        the uncompressed tarball, use this value to calculate the number of
        copies of the linux kernel that will be uncompressed.
        """
        self.nfail = 0
        self.tmpdir = data_dir.get_tmp_dir()
        self.tmpdir = self.params.get('dir_to_extract', default=self.tmpdir)
        self.base_dir = os.path.join(self.tmpdir, 'linux.orig')
        tarball_base = self.params.get('tarball_base',
                                       default='linux-2.6.18.tar.bz2')
        kernel_repo = self.params.get('kernel_repo',
                                      default='http://www.kernel.org/pub/'
                                      'linux/kernel/v2.6')
        tarball_url = os.path.join(kernel_repo, tarball_base)
        tarball_md5 = self.params.get('tarball_md5',
                                      default='296a6d150d260144639c3664d127d1'
                                      '74')
        parallel = self.params.get('parallel', default=True)
        self.parallel = parallel
        self.log.info('Downloading linux kernel tarball')
        self.tarball = self.fetch_asset(tarball_url,
                                        asset_hash=tarball_md5,
                                        algorithm='md5')
        size_tarball = os.path.getsize(self.tarball) // 1024 // 1024

        # Estimation of the tarball size after uncompression
        compress_ratio = 5
        est_size = size_tarball * compress_ratio
        self.sim_cps = self.get_sim_cps(est_size)
        self.log.info('Source file: %s', tarball_base)
        self.log.info('Megabytes per copy: %s', size_tarball)
        self.log.info('Compress ratio: %s', compress_ratio)
        self.log.info('Estimated size after uncompression: %s', est_size)
        self.log.info('Number of copies: %s', self.sim_cps)
        self.log.info('Parallel: %s', parallel)

        # Verify if space is available in disk
        disk_free_mb = (disk.freespace(self.tmpdir) // 1024) // 1024
        if disk_free_mb < (est_size * self.sim_cps):
            self.cancel("Space not available to extract the %s linux tars\n"
                        "Mount and Use other partitions in dir_to_extract arg "
                        "to run the test" % self.sim_cps)
예제 #28
0
    def setUp(self):
        """
        Downloads a copy of the linux kernel, calculate an estimated size of
        the uncompressed tarball, use this value to calculate the number of
        copies of the linux kernel that will be uncompressed.
        """
        self.nfail = 0
        self.tmpdir = data_dir.get_tmp_dir()
        self.tmpdir = self.params.get('dir_to_extract', default=self.tmpdir)
        tarball_base = self.params.get('tarball_base',
                                       default='linux-2.6.18.tar.bz2')
        kernel_repo = self.params.get('kernel_repo',
                                      default='http://www.kernel.org/pub/'
                                      'linux/kernel/v2.6')
        tarball_url = os.path.join(kernel_repo, tarball_base)
        tarball_md5 = self.params.get('tarball_md5',
                                      default='296a6d150d260144639c3664d127d1'
                                      '74')
        parallel = self.params.get('parallel', default=True)
        self.parallel = parallel
        self.log.info('Downloading linux kernel tarball')
        self.tarball = self.fetch_asset(tarball_url, asset_hash=tarball_md5,
                                        algorithm='md5')
        size_tarball = os.path.getsize(self.tarball) / 1024 / 1024

        # Estimation of the tarball size after uncompression
        compress_ratio = 5
        est_size = size_tarball * compress_ratio
        self.sim_cps = self.get_sim_cps(est_size)
        self.log.info('Source file: %s', tarball_base)
        self.log.info('Megabytes per copy: %s', size_tarball)
        self.log.info('Compress ratio: %s', compress_ratio)
        self.log.info('Estimated size after uncompression: %s', est_size)
        self.log.info('Number of copies: %s', self.sim_cps)
        self.log.info('Parallel: %s', parallel)

        # Verify if space is available in disk
        disk_free_mb = (disk.freespace(self.tmpdir) / 1024) / 1024
        if (disk_free_mb < est_size * self.sim_cps):
            self.cancel("Space not available to extract the %s linux tars\n"
                        "Mount and Use other partitions in dir_to_extract arg "
                        "to run the test" % self.sim_cps)
예제 #29
0
 def setUp(self):
     sm = SoftwareManager()
     detected_distro = distro.detect()
     # Check for basic utilities
     self.tmpdir = data_dir.get_tmp_dir()
     self.build_dir = self.params.get('build_dir', default=self.tmpdir)
     for package in ['gcc', 'make', 'patch']:
         if not sm.check_installed(package) and not sm.install(package):
             self.cancel('%s is needed for the test to be run' % package)
     url = 'https://github.com/kdlucas/byte-unixbench/archive/master.zip'
     tarball = self.fetch_asset("byte-unixbench.zip", locations=[url],
                                expire='7d')
     archive.extract(tarball, self.srcdir)
     self.sourcedir = os.path.join(self.srcdir,
                                   "byte-unixbench-master/UnixBench")
     os.chdir(self.sourcedir)
     makefile_patch = 'patch -p1 < %s' % (os.path.join(
         self.datadir, 'Makefile.patch'))
     process.run(makefile_patch, shell=True)
     build.make(self.sourcedir)
    def setUp(self):
        '''
        Sets all the reqd parameter and also
        mounts the tmpfs to be used in test.
        '''

        # Set params as per available memory in system
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        free_mem = int(memory.freememtotal() / 1024)
        self.dd_timeout = 900

        # Set block size as hugepage size * 2
        self.block_size = (memory.get_huge_page_size() / 1024) * 2
        self.count = free_mem / self.block_size

        # Mount device as per free memory size
        os.mkdir(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path, fstype="tmpfs",
                          args='-o size=%dM' % free_mem)
예제 #31
0
 def test(self):
     test_dir = self.params.get('test_dir', default=data_dir.get_tmp_dir())
     # 4 Different types of threads can be specified as an args
     # These args are given higher value to stress the system more
     # Here, test is run with default args
     args = self.params.get('args', default='')
     args = ' -d %s %s ' % (test_dir, args)
     process.system("blogbench " + args, shell=True, sudo=True)
     report_path = os.path.join(self.logdir, 'stdout')
     with open(report_path, 'r') as f:
         file_buff = f.read().splitlines()
         for line in file_buff:
             if 'Final score for writes:' in line:
                 write_score = line.split()[4]
             if 'Final score for reads :' in line:
                 read_score = line.split()[5]
     self.log.info("The Benchmark Scores for Write and Read are : "
                   "%s  and %s\n " % (write_score, read_score))
     self.log.info("Please Check Logfile %s for more info of benchmark"
                   % report_path)
예제 #32
0
 def test(self):
     test_dir = self.params.get('test_dir', default=data_dir.get_tmp_dir())
     # 4 Different types of threads can be specified as an args
     # These args are given higher value to stress the system more
     # Here, test is run with default args
     args = self.params.get('args', default='')
     args = ' -d %s %s ' % (test_dir, args)
     process.system("blogbench " + args, shell=True, sudo=True)
     report_path = os.path.join(self.logdir, 'stdout')
     with open(report_path, 'r') as f:
         file_buff = f.read().splitlines()
         for line in file_buff:
             if 'Final score for writes:' in line:
                 write_score = line.split()[4]
             if 'Final score for reads :' in line:
                 read_score = line.split()[5]
     self.log.info("The Benchmark Scores for Write and Read are : "
                   "%s  and %s\n " % (write_score, read_score))
     self.log.info("Please Check Logfile %s for more info of benchmark" %
                   report_path)
예제 #33
0
def get_tmp_dir(public=True):
    """
    Get the most appropriate tmp dir location.

    :param public: If public for all users' access
    """
    tmp_dir = None
    # apparmor deny /tmp/* /var/tmp/* and cause failure across tests
    # it is better to handle here
    if distro.detect().name == 'Ubuntu':
        tmp_dir = "/var/lib/libvirt/images"
        if not utils_path.usable_rw_dir(tmp_dir):
            logging.warning("Unable to write in '/var/lib/libvirt/images' "
                            "on Ubuntu, apparmor might complain...")
            tmp_dir = None
    tmp_dir = data_dir.get_tmp_dir(basedir=tmp_dir)
    if public:
        tmp_dir_st = os.stat(tmp_dir)
        os.chmod(tmp_dir, tmp_dir_st.st_mode | stat.S_IXUSR |
                 stat.S_IXGRP | stat.S_IXOTH | stat.S_IRGRP | stat.S_IROTH)
    return tmp_dir
예제 #34
0
    def setUp(self):
        '''
        Sets required params for dd workload and mounts the tmpfs
        '''

        # Get required mem info
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        self.block_size = int(mmap.PAGESIZE) // 1024
        # add mount point
        if os.path.exists(self.mem_path):
            os.makedirs(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path,
                          fstype="tmpfs",
                          mnt_check=False)
        free_space = (disk.freespace(self.mem_path)) // 1024
        # Reserving some memory (out of 100% memory reserving 25% memory)
        res_free_space = int(free_space / 4)
        free_space = free_space - res_free_space
        # Leaving out some free space in tmpfs
        self.count = (free_space // self.block_size) - 3
예제 #35
0
    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.get_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        if params is None:
            params = {}
        self.params = Params(params)
        self._raw_params = params

        self.tag = tag or self.params.get('tag')
        self.job = job

        basename = os.path.basename(self.name)

        tmpdir = data_dir.get_tmp_dir()

        self.basedir = os.path.dirname(inspect.getfile(self.__class__))
        self.datadir = os.path.join(self.basedir, '%s.data' % basename)

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.get_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
        io.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')
        self.log.debug('Test instance parameters:')

        # Set the helper set_default to the params object
        setattr(self.params, 'set_default', self._set_default)

        # Apply what comes from the params dict
        for key in sorted(self.params.keys()):
            self.log.debug('    %s = %s', key, self.params.get(key))
        self.log.debug('')

        # Apply what comes from the default_params dict
        self.log.debug('Default parameters:')
        for key in sorted(self.default_params.keys()):
            self.log.debug('    %s = %s', key, self.default_params.get(key))
            self.params.set_default(key, self.default_params[key])
        self.log.debug('')
        self.log.debug(
            'Test instance params override defaults whenever available')
        self.log.debug('')

        # If there's a timeout set, log a timeout reminder
        if self.params.timeout:
            self.log.info(
                'Test timeout set. Will wait %.2f s for '
                'PID %s to end', float(self.params.timeout), os.getpid())
            self.log.info('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self)
예제 #36
0
파일: test.py 프로젝트: eduardok/avocado
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, tag=None, job=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.get_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        if params is None:
            params = {}
        self.params = Params(params)
        self._raw_params = params

        shortname = self.params.get('shortname')
        s_tag = None
        if shortname:
            split_shortname = shortname.split('.')
            if len(split_shortname) > 1:
                s_tag = ".".join(split_shortname[1:])
        self.tag = tag or s_tag
        self.job = job
        self.basedir = os.path.join(data_dir.get_test_dir(), self.name)
        self.depsdir = os.path.join(self.basedir, 'deps')
        self.workdir = os.path.join(data_dir.get_tmp_dir(), self.name)
        if not os.path.isdir(self.workdir):
            os.makedirs(self.workdir)
        self.srcdir = os.path.join(self.workdir, 'src')
        if not os.path.isdir(self.srcdir):
            os.makedirs(self.srcdir)
        if base_logdir is None:
            base_logdir = data_dir.get_job_logs_dir()
        self.tagged_name = self.get_tagged_name(base_logdir)
        self.logdir = os.path.join(base_logdir, self.tagged_name)
        if not os.path.isdir(self.logdir):
            os.makedirs(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')
        self.sysinfodir = os.path.join(self.logdir, 'sysinfo')

        self.log = logging.getLogger("avocado.test")

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')
        self.log.debug('Test instance parameters:')

        # Set the helper set_default to the params object
        setattr(self.params, 'set_default', self._set_default)

        # Apply what comes from the params dict
        for key in sorted(self.params.keys()):
            self.log.debug('    %s = %s', key, self.params.get(key))
            setattr(self.params, key, self.params.get(key))
        self.log.debug('')

        # Apply what comes from the default_params dict
        self.log.debug('Default parameters:')
        for key in sorted(self.default_params.keys()):
            self.log.debug('    %s = %s', key, self.default_params.get(key))
            self.params.set_default(key, self.default_params[key])
        self.log.debug('')
        self.log.debug('Test instance params override defaults whenever available')
        self.log.debug('')

        # If there's a timeout set, log a timeout reminder
        if hasattr(self.params, 'timeout'):
            self.log.info('Test timeout set. Will wait %.2f s for '
                          'PID %s to end',
                          float(self.params.timeout), os.getpid())
            self.log.info('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.time_elapsed = None
        unittest.TestCase.__init__(self)
예제 #37
0
파일: test.py 프로젝트: FengYang/avocado
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, tag=None, job=None, runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.get_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        if params is None:
            params = {}
        self.params = Params(params)
        self._raw_params = params

        self.tag = tag or self.params.get('tag')
        self.job = job

        basename = os.path.basename(self.name)

        if job is not None:
            tmpdir = tempfile.mkdtemp(dir=data_dir.get_tmp_dir(),
                                      prefix='job-%s-' % job.unique_id)
        else:
            tmpdir = tempfile.mkdtemp(dir=data_dir.get_tmp_dir())

        self.basedir = os.path.dirname(inspect.getfile(self.__class__))
        self.datadir = os.path.join(self.basedir, '%s.data' % basename)

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = path.init_dir(tmpdir, basename)
        self.srcdir = path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.get_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        self.logdir = path.init_dir(base_logdir, self.tagged_name)
        io.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = path.init_dir(self.logdir, 'data')
        self.sysinfodir = path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')
        self.log.debug('Test instance parameters:')

        # Set the helper set_default to the params object
        setattr(self.params, 'set_default', self._set_default)

        # Apply what comes from the params dict
        for key in sorted(self.params.keys()):
            self.log.debug('    %s = %s', key, self.params.get(key))
        self.log.debug('')

        # Apply what comes from the default_params dict
        self.log.debug('Default parameters:')
        for key in sorted(self.default_params.keys()):
            self.log.debug('    %s = %s', key, self.default_params.get(key))
            self.params.set_default(key, self.default_params[key])
        self.log.debug('')
        self.log.debug('Test instance params override defaults whenever available')
        self.log.debug('')

        # If there's a timeout set, log a timeout reminder
        if self.params.timeout:
            self.log.info('Test timeout set. Will wait %.2f s for '
                          'PID %s to end',
                          float(self.params.timeout), os.getpid())
            self.log.info('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self)
예제 #38
0
def get_tmp_dir():
    return data_dir.get_tmp_dir()
예제 #39
0
파일: test.py 프로젝트: PyLearner/avocado
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, tag=None, job=None, runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.create_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            if not self.__log_warn_used:
                self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        self.tag = tag or None

        self.job = job

        basename = os.path.basename(self.name)

        tmpdir = data_dir.get_tmp_dir()

        self.filename = inspect.getfile(self.__class__).rstrip('co')
        self.basedir = os.path.dirname(self.filename)
        self.datadir = self.filename + '.data'

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.create_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
        genio.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        mux_entry = ['/test/*']
        if isinstance(params, dict):
            self.default_params = self.default_params.copy()
            self.default_params.update(params)
            params = []
        elif params is None:
            params = []
        elif isinstance(params, tuple):
            params, mux_entry = params[0], params[1]
        self.params = multiplexer.AvocadoParams(params, self.name, self.tag,
                                                mux_entry,
                                                self.default_params)

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self, methodName=methodName)
예제 #40
0
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """
    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = test_dict.get("start_vm", "no")

    # Server and client parameters
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    client_ip = test_dict.get("client_ip")
    client_user = test_dict.get("client_user")
    client_pwd = test_dict.get("client_pwd")
    server_cn = test_dict.get("server_cn")
    client_cn = test_dict.get("client_cn")
    target_ip = test_dict.get("target_ip", "")
    # generate remote IP
    if target_ip == "":
        if server_cn:
            target_ip = server_cn
        elif server_ip:
            target_ip = server_ip
        else:
            target_ip = target_ip
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    # Ceph disk parameters
    driver = test_dict.get("test_driver", "qemu")
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    source_type = test_dict.get("vm_disk_source_type", "file")
    virsh_options = test_dict.get("virsh_options", "--verbose --live")
    vol_name = test_dict.get("vol_name")
    disk_src_protocol = params.get("disk_source_protocol")
    source_file = test_dict.get("disk_source_file")
    disk_format = test_dict.get("disk_format", "qcow2")
    mon_host = params.get("mon_host")
    ceph_key_opt = ""
    attach_disk = False
    # Disk XML file
    disk_xml = None
    # Define ceph_disk conditional variable
    ceph_disk = "yes" == test_dict.get("ceph_disk")

    # For --postcopy enable
    postcopy_options = test_dict.get("postcopy_options")
    if postcopy_options and not virsh_options.count(postcopy_options):
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        test_dict['virsh_options'] = virsh_options

    # For bi-directional and tls reverse test
    uri_port = test_dict.get("uri_port", ":22")
    uri_path = test_dict.get("uri_path", "/system")
    src_uri = test_dict.get("migration_source_uri", "qemu:///system")
    uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port,
                               uri_path)
    test_dict["desuri"] = uri

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, test_dict)
    # Set up SSH key

    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)
    remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user,
                                           server_pwd, r"[\#\$]\s*$")
    remote_session.close()
    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)

    # Set up remote ssh key and remote /etc/hosts file for bi-direction migration
    migrate_vm_back = "yes" == test_dict.get("migrate_vm_back", "no")
    if migrate_vm_back:
        ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd)
        ssh_key.setup_remote_known_hosts_file(client_ip, server_ip,
                                              server_user, server_pwd)
    # Reset Vm state if needed
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Setup migration context
    migrate_setup = migration.MigrationTest()
    migrate_setup.migrate_pre_setup(test_dict["desuri"], params)

    # Install ceph-common on remote host machine.
    remote_ssh_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r"[\#\$]\s*$")
    if not utils_package.package_install(["ceph-common"], remote_ssh_session):
        test.error("Failed to install required packages on remote host")
    remote_ssh_session.close()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # Get initial Selinux config flex bit
        LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status()
        logging.info("previous local enforce :%s",
                     LOCAL_SELINUX_ENFORCING_STATUS)
        cmd_result = remote.run_remote_cmd('getenforce', params,
                                           runner_on_target)
        REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text
        logging.info("previous remote enforce :%s",
                     REMOTE_SELINUX_ENFORCING_STATUS)

        if ceph_disk:
            logging.info(
                "Put local SELinux in permissive mode when test ceph migrating"
            )
            utils_selinux.set_status("enforcing")

            logging.info("Put remote SELinux in permissive mode")
            cmd = "setenforce enforcing"
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            if status:
                test.Error("Failed to set SELinux " "in permissive mode")

            # Prepare ceph disk.
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            test_dict['key_file'] = key_file
            test_dict['first_disk'] = vm.get_first_disk_devices()
            ceph_key_opt, secret_uuid = prepare_ceph_disk(
                test_dict, remote_virsh_dargs, test, runner_on_target)
            host_ip = test_dict.get('mon_host')
            disk_image = test_dict.get('disk_img')

            # Build auth information.
            auth_attrs = {}
            auth_attrs['auth_user'] = params.get("auth_user")
            auth_attrs['secret_type'] = params.get("secret_type")
            auth_attrs['secret_uuid'] = secret_uuid
            build_disk_xml(vm_name,
                           disk_format,
                           host_ip,
                           disk_src_protocol,
                           vol_name,
                           disk_image,
                           auth=auth_attrs)

            vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name,
                                     shell=True).stdout_text
            logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt)
            try:
                if vm.is_dead():
                    vm.start()
            except virt_vm.VMStartError as e:
                logging.info("Failed to start VM")
                test.fail("Failed to start VM: %s" % vm_name)

        # Ensure the same VM name doesn't exist on remote host before migrating.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(cmd, params, runner_on_target)

        # Trigger migration
        migrate_vm(test, test_dict)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()
            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            logging.info(output)
            if status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd, params, runner_on_target)
                test.fail("Failed to run '%s' on remote: %s" % (cmd, output))
    finally:
        logging.info("Recovery test environment")
        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True)
        # Ensure VM can be cleaned up on remote host even migrating fail.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target)

        logging.info("Recovery VM XML configuration")
        vmxml_backup.sync()
        logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile)

        # Clean up ceph environment.
        if disk_src_protocol == "rbd":
            # Clean up secret
            secret_list = get_secret_list()
            if secret_list:
                for secret_uuid in secret_list:
                    virsh.secret_undefine(secret_uuid)
            # Clean up dirty secrets on remote host if testing involve in ceph auth.
            client_name = test_dict.get('client_name')
            client_key = test_dict.get("client_key")
            if client_name and client_key:
                try:
                    remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs)
                    remote_dirty_secret_list = get_secret_list(remote_virsh)
                    for dirty_secret_uuid in remote_dirty_secret_list:
                        remote_virsh.secret_undefine(dirty_secret_uuid)
                except (process.CmdError, remote.SCPError) as detail:
                    test.Error(detail)
                finally:
                    remote_virsh.close_session()
            # Delete the disk if it exists.
            disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img'))
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, ceph_key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        if LOCAL_SELINUX_ENFORCING_STATUS:
            logging.info("Restore SELinux in original mode")
            utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS)
        if REMOTE_SELINUX_ENFORCING_STATUS:
            logging.info("Put remote SELinux in original mode")
            cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS
            remote.run_remote_cmd(cmd, params, runner_on_target)

        # Remove known hosts on local host
        cmd = "ssh-keygen -R  %s" % server_ip
        process.run(cmd, ignore_status=True, shell=True)

        # Remove known hosts on remote host
        cmd = "ssh-keygen -R  %s" % client_ip
        remote.run_remote_cmd(cmd, params, runner_on_target)
예제 #41
0
    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.create_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            if not self.__log_warn_used:
                self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        self.tag = tag or None

        self.job = job

        basename = os.path.basename(self.name)

        tmpdir = data_dir.get_tmp_dir()

        self.filename = inspect.getfile(self.__class__).rstrip('co')
        self.basedir = os.path.dirname(self.filename)
        self.datadir = self.filename + '.data'

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.create_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
        genio.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        mux_entry = ['/test/*']
        if isinstance(params, dict):
            self.default_params = self.default_params.copy()
            self.default_params.update(params)
            params = []
        elif params is None:
            params = []
        elif isinstance(params, tuple):
            params, mux_entry = params[0], params[1]
        self.params = multiplexer.AvocadoParams(params, self.name, self.tag,
                                                mux_entry, self.default_params)

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self, methodName=methodName)