예제 #1
0
def run_clock_getres(test, params, env):
    """
    Verify if guests using kvm-clock as the time source have a sane clock
    resolution.

    @param test: kvm test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """
    t_name = "test_clock_getres"
    base_dir = "/tmp"

    deps_dir = os.path.join(test.bindir, "deps", t_name)
    os.chdir(deps_dir)
    try:
        utils.system("make clean")
        utils.system("make")
    except:
        raise error.TestError("Failed to compile %s" % t_name)

    test_clock = os.path.join(deps_dir, t_name)
    if not os.path.isfile(test_clock):
        raise error.TestError("Could not find %s" % t_name)

    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
    timeout = int(params.get("login_timeout", 360))
    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
    if not vm.copy_files_to(test_clock, base_dir):
        raise error.TestError("Failed to copy %s to VM" % t_name)
    session.cmd(os.path.join(base_dir, t_name))
    logging.info("PASS: Guest reported appropriate clock resolution")
    logging.info("guest's dmesg:")
    session.cmd_output("dmesg")
예제 #2
0
def restart(report_stop_failure=False):
    """Restart the session manager.

    - If the user is logged in, the session will be terminated.
    - If the UI is currently down, just go ahead and bring it up unless the
      caller has requested that a failure to stop be reported.
    - To ensure all processes are up and ready, this function will wait
      for the login prompt to show up and be marked as visible.

    @param report_stop_failure: False by default, set to True if you care about
                                the UI being up at the time of call and
                                successfully torn down by this call.
    """
    session = get_chrome_session_ident()

    # Log what we're about to do to /var/log/messages. Used to log crashes later
    # in cleanup by cros_ui_test.UITest.
    utils.system('logger "%s"' % UI_RESTART_ATTEMPT_MSG)

    try:
        if stop(allow_fail=not report_stop_failure) != 0:
            raise error.TestError('Could not stop session')
        start(wait_for_login_prompt=False)
        # Wait for login prompt to appear to indicate that all processes are
        # up and running again.
        wait_for_chrome_ready(session)
    finally:
        utils.system('logger "%s"' % UI_RESTART_COMPLETE_MSG)
예제 #3
0
파일: qemu_img.py 프로젝트: ceph/autotest
    def check_test(cmd):
        """
        Subcommand 'qemu-img check' test.

        This tests will 'dd' to create a specified size file, and check it.
        Then convert it to supported image_format in each loop and check again.

        @param cmd: qemu-img base command.
        """
        test_image = kvm_utils.get_path(test.bindir,
                                        params.get("image_name_dd"))
        print "test_image = %s" % test_image
        create_image_cmd = params.get("create_image_cmd")
        create_image_cmd = create_image_cmd % test_image
        print "create_image_cmd = %s" % create_image_cmd
        utils.system(create_image_cmd)
        s, o = _check(cmd, test_image)
        if not s:
            raise error.TestFail("Check image '%s' failed with error: %s" %
                                                           (test_image, o))
        for fmt in params.get("supported_image_formats").split():
            output_image = test_image + ".%s" % fmt
            _convert(cmd, fmt, test_image, output_image)
            s, o = _check(cmd, output_image)
            if not s:
                raise error.TestFail("Check image '%s' got error: %s" %
                                                     (output_image, o))
            os.remove(output_image)
        os.remove(test_image)
예제 #4
0
    def check_test(cmd):
        """
        Subcommand 'qemu-img check' test.

        This tests will 'dd' to create a specified size file, and check it.
        Then convert it to supported image_format in each loop and check again.

        @param cmd: qemu-img base command.
        """
        test_image = kvm_utils.get_path(test.bindir,
                                        params.get("image_name_dd"))
        print "test_image = %s" % test_image
        create_image_cmd = params.get("create_image_cmd")
        create_image_cmd = create_image_cmd % test_image
        print "create_image_cmd = %s" % create_image_cmd
        utils.system(create_image_cmd)
        s, o = _check(cmd, test_image)
        if not s:
            raise error.TestFail("Check image '%s' failed with error: %s" %
                                 (test_image, o))
        for fmt in params.get("supported_image_formats").split():
            output_image = test_image + ".%s" % fmt
            _convert(cmd, fmt, test_image, output_image)
            s, o = _check(cmd, output_image)
            if not s:
                raise error.TestFail("Check image '%s' got error: %s" %
                                     (output_image, o))
            os.remove(output_image)
        os.remove(test_image)
예제 #5
0
    def _convert(cmd,
                 output_fmt,
                 img_name,
                 output_filename,
                 fmt=None,
                 compressed="no",
                 encrypted="no"):
        """
        Simple wrapper of 'qemu-img convert' function.

        @param cmd: qemu-img base command.
        @param output_fmt: the output format of converted image
        @param img_name: image name that to be converted
        @param output_filename: output image name that converted
        @param fmt: output image format
        @param compressed: whether output image is compressed
        @param encrypted: whether output image is encrypted
        """
        cmd += " convert"
        if compressed == "yes":
            cmd += " -c"
        if encrypted == "yes":
            cmd += " -e"
        if fmt:
            cmd += " -f %s" % fmt
        cmd += " -O %s" % output_fmt
        cmd += " %s %s" % (img_name, output_filename)
        logging.info("Converting '%s' from format '%s' to '%s'", img_name, fmt,
                     output_fmt)
        utils.system(cmd)
예제 #6
0
파일: qemu_img.py 프로젝트: ceph/autotest
    def _create(cmd, img_name, fmt, img_size=None, base_img=None,
               base_img_fmt=None, encrypted="no"):
        """
        Simple wrapper of 'qemu-img create'

        @param cmd: qemu-img base command.
        @param img_name: name of the image file
        @param fmt: image format
        @param img_size:  image size
        @param base_img: base image if create a snapshot image
        @param base_img_fmt: base image format if create a snapshot image
        @param encrypted: indicates whether the created image is encrypted
        """
        cmd += " create"
        if encrypted == "yes":
            cmd += " -e"
        if base_img:
            cmd += " -b %s" % base_img
            if base_img_fmt:
                cmd += " -F %s" % base_img_fmt
        cmd += " -f %s" % fmt
        cmd += " %s" % img_name
        if img_size:
            cmd += " %s" % img_size
        utils.system(cmd)
def ensure_running(service_name):
    """Fails if |service_name| is not running.

    @param service_name: name of the service.
    """
    cmd = 'initctl status %s | grep start/running' % service_name
    utils.system(cmd)
예제 #8
0
    def __init__(self, job):
        """
                job
                        The job object for this job
        """
        self.autodir = os.path.abspath(os.environ['AUTODIR'])
        self.setup(job)

        src = job.control_get()
        dest = os.path.join(self.autodir, 'control')
        if os.path.abspath(src) != os.path.abspath(dest):
            shutil.copyfile(src, dest)
            job.control_set(dest)

        logging.info('Symlinking init scripts')
        rc = os.path.join(self.autodir, 'tools/autotest')
        # see if system supports event.d versus inittab
        if os.path.exists('/etc/event.d'):
            # NB: assuming current runlevel is default
            initdefault = utils.system_output('/sbin/runlevel').split()[1]
        elif os.path.exists('/etc/inittab'):
            initdefault = utils.system_output(
                'grep :initdefault: /etc/inittab')
            initdefault = initdefault.split(':')[1]
        else:
            initdefault = '2'

        try:
            utils.system('ln -sf %s /etc/init.d/autotest' % rc)
            utils.system('ln -sf %s /etc/rc%s.d/S99autotest' %
                         (rc, initdefault))
        except:
            logging.warning("Linking init scripts failed")
예제 #9
0
def unmap_url_cache(cachedir, url, expected_md5):
    """
    Downloads a file from a URL to a cache directory. If the file is already
    at the expected position and has the expected md5 number, let's not
    download it again.
    """
    # Let's convert cachedir to a canonical path, if it's not already
    cachedir = os.path.realpath(cachedir)
    if not os.path.isdir(cachedir):
        try:
            utils.system("mkdir -p " + cachedir)
        except:
            raise ValueError("Could not create cache directory %s" % cachedir)
    file_from_url = os.path.basename(url)
    file_local_path = os.path.join(cachedir, file_from_url)
    if os.path.isfile(file_local_path):
        file_md5 = get_md5sum(file_local_path)
        if file_md5 == expected_md5:
            # File is already at the expected position and ready to go
            src = file_from_url
        else:
            # Let's download the package again, it's corrupted...
            src = url
    else:
        # File is not there, let's download it
        src = url
    return utils.unmap_url(cachedir, src, cachedir)
예제 #10
0
    def cleanup(self):
        """Restore the original environment as before the call to setup().

        This method makes a best-effort attempt to restore the environment and
        logs all the errors encountered but doesn't fail.
        """
        try:
            utils.system('stop p2p')
            avahi_utils.avahi_stop()
        except:
            logging.exception('Failed to stop tested services.')

        if self._tcpdump:
            self._tcpdump.stop()

        if self.tap:
            self.tap.down()

        # Restore p2p files.
        try:
            p2p_restore_files()
        except OSError:
            logging.exception('Failed to restore the P2P backup.')

        if self._services:
            self._services.restore_services()
예제 #11
0
파일: qemu_img.py 프로젝트: ceph/autotest
    def _convert(cmd, output_fmt, img_name, output_filename,
                fmt=None, compressed="no", encrypted="no"):
        """
        Simple wrapper of 'qemu-img convert' function.

        @param cmd: qemu-img base command.
        @param output_fmt: the output format of converted image
        @param img_name: image name that to be converted
        @param output_filename: output image name that converted
        @param fmt: output image format
        @param compressed: whether output image is compressed
        @param encrypted: whether output image is encrypted
        """
        cmd += " convert"
        if compressed == "yes":
            cmd += " -c"
        if encrypted == "yes":
            cmd += " -e"
        if fmt:
            cmd += " -f %s" % fmt
        cmd += " -O %s" % output_fmt
        cmd += " %s %s" % (img_name, output_filename)
        logging.info("Converting '%s' from format '%s' to '%s'", img_name, fmt,
                     output_fmt)
        utils.system(cmd)
예제 #12
0
 def __init__(self, interface_name, seconds_for_restore_connection = None):
     # Verify whether interface is known to the system
     utils.system('%s %s' % (self._ifconfig_command, interface_name))
     
     self._interface_name = interface_name
     if seconds_for_restore_connection:
         self._seconds_for_restore_connection = seconds_for_restore_connection
예제 #13
0
    def __init__(self, job):
        """
                job
                        The job object for this job
        """
        self.autodir = os.path.abspath(os.environ['AUTODIR'])
        self.setup(job)

        src = job.control_get()
        dest = os.path.join(self.autodir, 'control')
        if os.path.abspath(src) != os.path.abspath(dest):
            shutil.copyfile(src, dest)
            job.control_set(dest)

        logging.info('Symlinking init scripts')
        rc = os.path.join(self.autodir, 'tools/autotest')
        # see if system supports event.d versus inittab
        if os.path.exists('/etc/event.d'):
            # NB: assuming current runlevel is default
            initdefault = utils.system_output('/sbin/runlevel').split()[1]
        elif os.path.exists('/etc/inittab'):
            initdefault = utils.system_output('grep :initdefault: /etc/inittab')
            initdefault = initdefault.split(':')[1]
        else:
            initdefault = '2'

        try:
            utils.system('ln -sf %s /etc/init.d/autotest' % rc)
            utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % (rc,initdefault))
        except:
            logging.warning("Linking init scripts failed")
예제 #14
0
    def setup(self, dumpdir=None):
        """Initializes avahi daemon on a new tap interface.

        @param dumpdir: Directory where the traffic on the new tap interface
                        is recorded. A value of None disables traffic dumping.
        """
        try:
            from lansim import tuntap
        except ImportError:
            logging.exception('Failed to import lansim.')
            raise error.TestError('Error importing lansim. Did you setup_dep '
                                  'and install_pkg lansim on your test?')

        # Ensure p2p and avahi aren't running.
        self._services = service_stopper.ServiceStopper(['p2p', 'avahi'])
        self._services.stop_services()

        # Backup p2p files.
        p2p_backup_files()

        # Initialize the TAP interface.
        self.tap = tuntap.TunTap(tuntap.IFF_TAP, name=self._tap_name)
        self.tap.set_addr(self._tap_ip, self._tap_mask)
        self.tap.up()

        # Enable traffic dump.
        if not dumpdir is None:
            dumpfile = os.path.join(dumpdir, 'dump-%s.pcap' % self.tap.name)
            self._tcpdump = tcpdump.Tcpdump(self.tap.name, dumpfile)

        # Re-launch avahi-daemon on the TAP interface only.
        avahi_utils.avahi_start_on_iface(self.tap.name)
        utils.system("start p2p")
예제 #15
0
def load_module(module_name):
    # Checks if a module has already been loaded
    if module_is_loaded(module_name):
        return False

    utils.system("/sbin/modprobe " + module_name)
    return True
예제 #16
0
def load_module(module_name):
    # Checks if a module has already been loaded
    if module_is_loaded(module_name):
        return False

    utils.system('/sbin/modprobe ' + module_name)
    return True
예제 #17
0
    def _create(cmd,
                img_name,
                fmt,
                img_size=None,
                base_img=None,
                base_img_fmt=None,
                encrypted="no"):
        """
        Simple wrapper of 'qemu-img create'

        @param cmd: qemu-img base command.
        @param img_name: name of the image file
        @param fmt: image format
        @param img_size:  image size
        @param base_img: base image if create a snapshot image
        @param base_img_fmt: base image format if create a snapshot image
        @param encrypted: indicates whether the created image is encrypted
        """
        cmd += " create"
        if encrypted == "yes":
            cmd += " -e"
        if base_img:
            cmd += " -b %s" % base_img
            if base_img_fmt:
                cmd += " -F %s" % base_img_fmt
        cmd += " -f %s" % fmt
        cmd += " %s" % img_name
        if img_size:
            cmd += " %s" % img_size
        utils.system(cmd)
예제 #18
0
파일: packager.py 프로젝트: yochow/autotest
def process_all_packages(pkgmgr, client_dir, upload_paths, remove=False):
    """Process a full upload of packages as a directory upload."""
    test_dir = os.path.join(client_dir, "tests")
    site_test_dir = os.path.join(client_dir, "site_tests")
    dep_dir = os.path.join(client_dir, "deps")
    prof_dir = os.path.join(client_dir, "profilers")
    # Directory where all are kept
    temp_dir = tempfile.mkdtemp()
    try:
        packages.check_diskspace(temp_dir)
    except packages.RepoDiskFull:
        print ("Temp destination for packages is full %s, aborting upload"
               % temp_dir)
        os.rmdir(temp_dir)
        sys.exit(1)

    # process tests
    tests_list = get_subdir_list('tests', client_dir)
    tests = ','.join(tests_list)

    # process site_tests
    site_tests_list = get_subdir_list('site_tests', client_dir)
    site_tests = ','.join(site_tests_list)

    # process deps
    deps_list = get_subdir_list('deps', client_dir)
    deps = ','.join(deps_list)

    # process profilers
    profilers_list = get_subdir_list('profilers', client_dir)
    profilers = ','.join(profilers_list)

    # Update md5sum
    if not remove:
        tar_packages(pkgmgr, 'profiler', profilers, prof_dir, temp_dir)
        tar_packages(pkgmgr, 'dep', deps, dep_dir, temp_dir)
        tar_packages(pkgmgr, 'test', site_tests, client_dir, temp_dir)
        tar_packages(pkgmgr, 'test', tests, client_dir, temp_dir)
        tar_packages(pkgmgr, 'client', 'autotest', client_dir, temp_dir)
        cwd = os.getcwd()
        os.chdir(temp_dir)
        client_utils.system('md5sum * > packages.checksum')
        os.chdir(cwd)
        for path in upload_paths:
            print "Uploading to: " + path
            pkgmgr.upload_pkg(temp_dir, path)
        client_utils.run('rm -rf ' + temp_dir)
    else:
        for repo_url in upload_paths:
            process_packages(pkgmgr, 'test', tests, client_dir, repo_url,
                             remove=remove)
            process_packages(pkgmgr, 'test', site_tests, client_dir, repo_url,
                             remove=remove)
            process_packages(pkgmgr, 'client', 'autotest', client_dir, repo_url,
                             remove=remove)
            process_packages(pkgmgr, 'dep', deps, dep_dir, repo_url,
                             remove=remove)
            process_packages(pkgmgr, 'profiler', profilers, prof_dir, repo_url,
                             remove=remove)
예제 #19
0
파일: rsync.py 프로젝트: yochow/autotest
 def sync(self, src, dest):
     os.chdir(self.target)
     if not os.path.isdir(dest):
         os.makedirs(dest)
     src = os.path.join(self.prefix, src)
     cmd = self.command + ' %s "%s" "%s"' % (self.exclude, src, dest)
     # print cmd + ' >> %s 2>&1' % self.tmpfile
     utils.system(cmd + ' >> %s 2>&1' % self.tmpfile)
def runtest(job, url, tag, args, dargs):
    # Leave some autotest bread crumbs in the system logs.
    utils.system('logger "autotest runtest %s"' % url, ignore_status=True)
    common_test.runtest(job, url, tag, args, dargs, locals(), globals(),
                        job.sysinfo.log_before_each_test,
                        job.sysinfo.log_after_each_test,
                        job.sysinfo.log_before_each_iteration,
                        job.sysinfo.log_after_each_iteration)
예제 #21
0
def publish_job(jobdir):
    cmd = RSYNC_COMMAND % (jobdir, options.dest)
    utils.system(cmd)

    # mark the jobdir as published
    fd = open(os.path.join(jobdir, PUBLISH_FLAGFILE), 'w')
    fd.close()
    print 'Published', jobdir
예제 #22
0
def restore_scaling_governor_states(path_value_list):
    """
    Restores governor states. Inverse operation to get_scaling_governor_states.
    """
    for (path, value) in path_value_list:
        cmd = 'echo %s > %s' % (value.rstrip('\n'), path)
        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
        utils.system(cmd, ignore_status=True)
예제 #23
0
def publish_job(jobdir):
    cmd = RSYNC_COMMAND % (jobdir, options.dest)
    utils.system(cmd)

    # mark the jobdir as published
    fd = open(os.path.join(jobdir, PUBLISH_FLAGFILE), 'w')
    fd.close()
    print 'Published', jobdir
def stop_job(service_name):
    """
   Stops an upstart job.
   Fails if the stop command fails.

   @param service_name: name of service
   """

    utils.system('stop %s' % service_name)
예제 #25
0
 def start_server(self):
     """
     Start the radvd server.  The server will daemonize itself and
     run in the background.
     """
     self._cleanup()
     self._write_config_file()
     utils.system('%s -p %s -C %s' %
                  (RADVD_EXECUTABLE, RADVD_PID_FILE, RADVD_CONFIG_FILE))
예제 #26
0
    def run(self, log_dir):
        """Copies this log dir to the destination dir, then purges the source.

        @param log_dir: The destination log directory.
        """
        super(purgeable_logdir, self).run(log_dir)

        if os.path.exists(self.dir):
            utils.system("rm -rf %s/*" % (self.dir))
예제 #27
0
def load_module(module_name, params=None):
    # Checks if a module has already been loaded
    if module_is_loaded(module_name):
        return False

    cmd = '/sbin/modprobe ' + module_name
    if params:
        cmd += ' ' + params
    utils.system(cmd)
    return True
예제 #28
0
def set_scaling_governors(value):
    """
    Sets all scaling governor to string value.
    Sample values: 'performance', 'interactive', 'ondemand', 'powersave'.
    """
    paths = _get_cpufreq_paths('scaling_governor')
    for path in paths:
        cmd = 'echo %s > %s' % (value, path)
        logging.info('Writing scaling governor mode \'%s\' -> %s', value, path)
        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
        utils.system(cmd, ignore_status=True)
예제 #29
0
def nohup(command, stdout='/dev/null', stderr='/dev/null', background=True,
                                                                env = {}):
    cmd = ' '.join(key+'='+val for key, val in env.iteritems())
    cmd += ' nohup ' + command
    cmd += ' > %s' % stdout
    if stdout == stderr:
        cmd += ' 2>&1'
    else:
        cmd += ' 2> %s' % stderr
    if background:
        cmd += ' &'
    utils.system(cmd)
예제 #30
0
 def start(self):
     """
     Start the DHCPv6 server.  The server will daemonize itself and
     run in the background.
     """
     self._cleanup()
     self._write_config_file()
     utils.system('%s -6 -pf %s -cf %s %s' %
                  (DHCPV6_SERVER_EXECUTABLE,
                   DHCPV6_SERVER_PID_FILE,
                   DHCPV6_SERVER_CONFIG_FILE,
                   self._interface))
예제 #31
0
 def stop(self, test):
     try:
         term_profiler = "kill -15 %d" % self.pid
         # send SIGTERM to iostat and give it a 5-sec timeout
         utils.system(term_profiler, timeout=5)
     except error.CmdError:  # probably times out
         pass
     # do a ps again to see if iostat is still there
     ps_cmd = "ps -p %d | grep iostat" % self.pid
     out = utils.system_output(ps_cmd, ignore_status=True)
     if out != '':
         kill_profiler = 'kill -9 %d' % self.pid
         utils.system(kill_profiler, ignore_status=True)
예제 #32
0
파일: iostat.py 프로젝트: Poohby/autotest
 def stop(self, test):
     try:
         term_profiler = "kill -15 %d" % self.pid
         # send SIGTERM to iostat and give it a 5-sec timeout
         utils.system(term_profiler, timeout=5)
     except error.CmdError: # probably times out
         pass
     # do a ps again to see if iostat is still there
     ps_cmd = "ps -p %d | grep iostat" % self.pid
     out = utils.system_output(ps_cmd, ignore_status=True)
     if out != '':
         kill_profiler = 'kill -9 %d' % self.pid
         utils.system(kill_profiler, ignore_status=True)
 def _device_release(self, cmd, device):
     if utils.system(cmd, ignore_status=True) == 0:
         return
     logging.warning("Could not release %s. Retrying..." % (device))
     # Other things (like cros-disks) may have the device open briefly,
     # so if we initially fail, try again and attempt to gather details
     # on who else is using the device.
     fuser = utils.system_output('fuser -v %s' % (device),
                                 retain_output=True)
     lsblk = utils.system_output('lsblk %s' % (device), retain_output=True)
     time.sleep(1)
     if utils.system(cmd, ignore_status=True) == 0:
         return
     raise error.TestFail('"%s" failed: %s\n%s' % (cmd, fuser, lsblk))
예제 #34
0
def set_dirty_writeback_centisecs(time=60000):
    """
    In hundredths of a second, this is how often pdflush wakes up to write data
    to disk. The default wakes up the two (or more) active threads every five
    seconds. The ChromeOS default is 10 minutes.

    We use this to set as low as 1 second to flush error messages in system
    logs earlier to disk.
    """
    # Flush buffers first to make this function synchronous.
    utils.system('sync')
    if time >= 0:
        cmd = 'echo %d > %s' % (time, _DIRTY_WRITEBACK_CENTISECS)
        utils.system(cmd)
예제 #35
0
def nohup(command,
          stdout='/dev/null',
          stderr='/dev/null',
          background=True,
          env={}):
    cmd = ' '.join(key + '=' + val for key, val in env.iteritems())
    cmd += ' nohup ' + command
    cmd += ' > %s' % stdout
    if stdout == stderr:
        cmd += ' 2>&1'
    else:
        cmd += ' 2> %s' % stderr
    if background:
        cmd += ' &'
    utils.system(cmd)
    def _create_dump_from_query(self, query):
        """Dumps result of a query into a text file.

        @param query: Query to execute.

        @return: The path to a tempfile containing the response of the query.
        """
        if not self._db:
            raise BackupError(
                "_create_dump_from_query requires a specific db.")
        parameters = {'db': self._db, 'query': query}
        parameters['user'], parameters['password'] = self._get_user_pass()
        _, parameters['filename'] = tempfile.mkstemp('autotest_db_dump')
        utils.system('set -o pipefail; mysql -u %(user)s -p%(password)s '
                     '%(db)s -e "%(query)s" > %(filename)s' % parameters)
        return parameters['filename']
    def create_mysql_dump(self):
        """Returns the path to a mysql dump of the current autotest DB."""
        user, password = self._get_user_pass()
        _, filename = tempfile.mkstemp('autotest_db_dump')
        logging.debug('Dumping mysql database to file %s', filename)
        extra_dump_args = ''
        for entry in IGNORE_TABLES:
            extra_dump_args += '--ignore-table=%s ' % entry

        if not self._db:
            extra_dump_args += "--all-databases"
        db_name = self._db or ''
        utils.system('set -o pipefail; mysqldump --user=%s '
                     '--password=%s %s %s| gzip - > %s' %
                     (user, password, extra_dump_args, db_name, filename))
        return filename
예제 #38
0
def stop_and_wait_for_chrome_to_exit(timeout_secs=40):
    """Stops the UI and waits for chrome to exit.

    Stops the UI and waits for all chrome processes to exit or until
    timeout_secs is reached.

    Args:
        timeout_secs: float number of seconds to wait.

    Returns:
        True upon successfully stopping the UI and all chrome processes exiting.
        False otherwise.
    """
    status = stop(allow_fail=True)
    if status:
        logging.error('stop ui returned non-zero status: %s', status)
        return False
    start_time = time.time()
    while time.time() - start_time < timeout_secs:
        status = utils.system('pgrep chrome', ignore_status=True)
        if status == 1: return True
        time.sleep(1)
    logging.error('stop ui failed to stop chrome within %s seconds',
                  timeout_secs)
    return False
예제 #39
0
def process_is_alive(name):
    """
    'pgrep name' misses all python processes and also long process names.
    'pgrep -f name' gets all shell commands with name in args.
    So look only for command whose first nonblank word ends with name.
    """
    return utils.system("pgrep -f '^[^ ]*%s\W'" % name, ignore_status=True) == 0
예제 #40
0
def process_is_alive(name_pattern):
    """
    'pgrep name' misses all python processes and also long process names.
    'pgrep -f name' gets all shell commands with name in args.
    So look only for command whose initial pathname ends with name.
    Name itself is an egrep pattern, so it can use | etc for variations.
    """
    return utils.system("pgrep -f '^([^ /]*/)*(%s)([ ]|$)'" % name_pattern, ignore_status=True) == 0
예제 #41
0
    def tar_package(self, pkg_name, src_dir, dest_dir, exclude_string=None):
        '''
        Create a tar.bz2 file with the name 'pkg_name' say test-blah.tar.bz2.
        Excludes the directories specified in exclude_string while tarring
        the source. Returns the tarball path.
        '''
        tarball_path = os.path.join(dest_dir, pkg_name)
        temp_path = tarball_path + '.tmp'
        cmd = "tar -cvjf %s -C %s %s " % (temp_path, src_dir, exclude_string)

        try:
            utils.system(cmd)
        except:
            os.unlink(temp_path)
            raise

        os.rename(temp_path, tarball_path)
        return tarball_path
예제 #42
0
    def run(self, log_dir):
        """Copies this log directory to the specified directory.

        @param log_dir: The destination log directory.
        """
        if os.path.exists(self.dir):
            parent_dir = os.path.dirname(self.dir)
            utils.system("mkdir -p %s%s" % (log_dir, parent_dir))
            # Take source permissions and add ugo+r so files are accessible via
            # archive server.
            additional_exclude_str = ""
            if self.additional_exclude:
                additional_exclude_str = "--exclude=" + self.additional_exclude

            utils.system("rsync --no-perms --chmod=ugo+r -a --exclude=autoserv*"
                         " --safe-links"
                         " %s %s %s%s" % (additional_exclude_str, self.dir,
                                          log_dir, parent_dir))
예제 #43
0
파일: serial.py 프로젝트: yochow/autotest
 def run_conmux(self, cmd):
     """
     Send a command to the conmux session
     """
     if not self.conmux_attach or not os.path.exists(self.conmux_attach):
         return False
     cmd = "%s %s echo %s 2> /dev/null" % (self.conmux_attach, self.get_conmux_hostname(), cmd)
     result = utils.system(cmd, ignore_status=True)
     return result == 0
예제 #44
0
    def run(self, log_dir):
        """Copies this log directory to the specified directory.

        @param log_dir: The destination log directory.
        """
        from_dir = os.path.realpath(self.dir)
        if os.path.exists(from_dir):
            parent_dir = os.path.dirname(from_dir)
            utils.system("mkdir -p %s%s" % (log_dir, parent_dir))

            excludes = [
                    "--exclude=%s" % self._anchored_exclude_pattern(from_dir, x)
                    for x in self._excludes]
            # Take source permissions and add ugo+r so files are accessible via
            # archive server.
            utils.system(
                    "rsync --no-perms --chmod=ugo+r -a --safe-links %s %s %s%s"
                    % (" ".join(excludes), from_dir, log_dir, parent_dir))
예제 #45
0
    def tar_package(self, pkg_name, src_dir, dest_dir, exclude_string=None):
        '''
        Create a tar.bz2 file with the name 'pkg_name' say test-blah.tar.bz2.
        Excludes the directories specified in exclude_string while tarring
        the source. Returns the tarball path.
        '''
        tarball_path = os.path.join(dest_dir, pkg_name)
        temp_path = tarball_path + '.tmp'
        cmd = "tar -cvjf %s -C %s %s " % (temp_path, src_dir, exclude_string)

        try:
            utils.system(cmd)
        except:
            os.unlink(temp_path)
            raise

        os.rename(temp_path, tarball_path)
        return tarball_path
예제 #46
0
def process_is_alive(name_pattern):
    """
    'pgrep name' misses all python processes and also long process names.
    'pgrep -f name' gets all shell commands with name in args.
    So look only for command whose initial pathname ends with name.
    Name itself is an egrep pattern, so it can use | etc for variations.
    """
    return utils.system("pgrep -f '^([^ /]*/)*(%s)([ ]|$)'" % name_pattern,
                        ignore_status=True) == 0
예제 #47
0
def unload_module(module_name):
    """
    Removes a module. Handles dependencies. If even then it's not possible
    to remove one of the modules, it will trhow an error.CmdError exception.

    @param module_name: Name of the module we want to remove.
    """
    l_raw = utils.system_output("/sbin/lsmod").splitlines()
    lsmod = [x for x in l_raw if x.split()[0] == module_name]
    if len(lsmod) > 0:
        line_parts = lsmod[0].split()
        if len(line_parts) == 4:
            submodules = line_parts[3].split(",")
            for submodule in submodules:
                unload_module(submodule)
        utils.system("/sbin/modprobe -r %s" % module_name)
        logging.info("Module %s unloaded" % module_name)
    else:
        logging.info("Module %s is already unloaded" % module_name)
예제 #48
0
def unload_module(module_name):
    """
    Removes a module. Handles dependencies. If even then it's not possible
    to remove one of the modules, it will trhow an error.CmdError exception.

    @param module_name: Name of the module we want to remove.
    """
    l_raw = utils.system_output("/sbin/lsmod").splitlines()
    lsmod = [x for x in l_raw if x.split()[0] == module_name]
    if len(lsmod) > 0:
        line_parts = lsmod[0].split()
        if len(line_parts) == 4:
            submodules = line_parts[3].split(",")
            for submodule in submodules:
                unload_module(submodule)
        utils.system("/sbin/modprobe -r %s" % module_name)
        logging.info("Module %s unloaded" % module_name)
    else:
        logging.info("Module %s is already unloaded" % module_name)
예제 #49
0
def ping_default_gateway():
    """Ping the default gateway."""

    network = open("/etc/sysconfig/network")
    m = re.search("GATEWAY=(\S+)", network.read())

    if m:
        gw = m.group(1)
        cmd = "ping %s -c 5 > /dev/null" % gw
        return utils.system(cmd, ignore_status=True)

    raise error.TestError("Unable to find default gateway")
예제 #50
0
 def _check_code(self):
     """
     Verifies the file with run_pylint.py. This tool will call the static
     code checker pylint using the special autotest conventions and warn
     only on problems. If problems are found, a report will be generated.
     Some of the problems reported might be bogus, but it's allways good
     to look at them.
     """
     c_cmd = 'run_pylint.py %s' % self.path
     rc = utils.system(c_cmd, ignore_status=True)
     if rc != 0:
         logging.error("Syntax issues found during '%s'", c_cmd)
예제 #51
0
    def tar_package(self, pkg_name, src_dir, dest_dir, include_string=None,
                    exclude_string=None):
        '''
        Create a tar.bz2 file with the name 'pkg_name' say test-blah.tar.bz2.

        Includes the files specified in include_string, and excludes the files
        specified on the exclude string, while tarring the source. Returns the
        destination tarball path.

        @param pkg_name: Package name.
        @param src_dir: Directory that contains the data to be packaged.
        @param dest_dir: Directory that will hold the destination tarball.
        @param include_string: Pattern that represents the files that will be
                added to the tar package.
        @param exclude_string: Pattern that represents the files that should be
                excluded from the tar package.
        '''
        tarball_path = os.path.join(dest_dir, pkg_name)
        temp_path = tarball_path + '.tmp'
        cmd_list = ['tar', '-cf', temp_path, '-C', src_dir]
        if _PBZIP2_AVAILABLE:
            cmd_list.append('--use-compress-prog=pbzip2')
        else:
            cmd_list.append('-j')
        if include_string is not None:
            cmd_list.append(include_string)
        if exclude_string is not None:
            if not "--exclude" in exclude_string:
                cmd_list.append('--exclude')
            cmd_list.append(exclude_string)

        try:
            utils.system(' '.join(cmd_list))
        except Exception:
            os.unlink(temp_path)
            raise

        os.rename(temp_path, tarball_path)
        return tarball_path
예제 #52
0
def run_module_probe(test, params, env):
    """
    load/unload KVM modules several times.

    The test can run in two modes:

    - based on previous 'build' test: in case KVM modules were installed by a
      'build' test, we used the modules installed by the previous test.

    - based on own params: if no previous 'build' test was run,
      we assume a pre-installed KVM module. Some parameters that
      work for the 'build' can be used, then, such as 'extra_modules'.
    """

    installer_object = env.previous_installer()
    if installer_object is None:
        installer_object = installer.PreInstalledKvm()
        installer_object.set_install_params(test, params)

    logging.debug('installer object: %r', installer_object)

    mod_str = params.get("mod_list")
    if mod_str:
        mod_list = re.split("[, ]", mod_str)
        logging.debug("mod list will be: %r", mod_list)
    else:
        mod_list = installer_object.full_module_list()
        logging.debug("mod list from installer: %r", mod_list)

    # unload the modules before starting:
    installer_object._unload_modules(mod_list)

    load_count = int(params.get("load_count", 100))
    try:
        for i in range(load_count):
            try:
                installer_object.load_modules(mod_list)
            except Exception,e:
                raise error.TestFail("Failed to load modules [%r]: %s" %
                                     (installer_object.full_module_list, e))

            # unload using rmmod directly because utils.unload_module() (used by
            # installer) does too much (runs lsmod, checks for dependencies),
            # and we want to run the loop as fast as possible.
            for mod in reversed(mod_list):
                r = utils.system("rmmod %s" % (mod), ignore_status=True)
                if r <> 0:
                    raise error.TestFail("Failed to unload module %s. "
                                         "exit status: %d" % (mod, r))
    finally:
        installer_object.load_modules()
예제 #53
0
def get_public_key():
    """
    Return a valid string ssh public key for the user executing autoserv or
    autotest. If there's no DSA or RSA public key, create a DSA keypair with
    ssh-keygen and return it.
    """

    ssh_conf_path = os.path.expanduser('~/.ssh')

    dsa_public_key_path = os.path.join(ssh_conf_path, 'id_dsa.pub')
    dsa_private_key_path = os.path.join(ssh_conf_path, 'id_dsa')

    rsa_public_key_path = os.path.join(ssh_conf_path, 'id_rsa.pub')
    rsa_private_key_path = os.path.join(ssh_conf_path, 'id_rsa')

    has_dsa_keypair = os.path.isfile(dsa_public_key_path) and \
        os.path.isfile(dsa_private_key_path)
    has_rsa_keypair = os.path.isfile(rsa_public_key_path) and \
        os.path.isfile(rsa_private_key_path)

    if has_dsa_keypair:
        print 'DSA keypair found, using it'
        public_key_path = dsa_public_key_path

    elif has_rsa_keypair:
        print 'RSA keypair found, using it'
        public_key_path = rsa_public_key_path

    else:
        print 'Neither RSA nor DSA keypair found, creating DSA ssh key pair'
        utils.system('ssh-keygen -t dsa -q -N "" -f %s' % dsa_private_key_path)
        public_key_path = dsa_public_key_path

    public_key = open(public_key_path, 'r')
    public_key_str = public_key.read()
    public_key.close()

    return public_key_str
예제 #54
0
    def execute(self):
        os.chdir(self.tmpdir)
        (p1, _) = utils.run_bg('dd if=/dev/hda3 of=/dev/null')
        time.sleep(60)
        blah = os.path.join(self.tmpdir, 'blah')
        dirty_bin = os.path.join(self.srcdir, 'dirty')
        dirty_op = os.path.join(self.tmpdir, 'dirty')
        utils.system('echo AA > ' + blah)
        p2 = subprocess.Popen(dirty_bin + ' ' + blah + ' 1 > ' + dirty_op,
                              shell=True)
        time.sleep(600)
        if p2.poll() is None:
            utils.nuke_subprocess(p1)
            utils.nuke_subprocess(p2)
            raise error.TestFail('Writes made no progress')
# Commenting out use of utils.run as there is a timeout bug
#
#       try:
#           utils.run(dirty_bin + ' ' + blah + '1 > ' + dirty_op, 900, False,
#                     None, None)
#       except:
#           utils.nuke_subprocess(p1)
#           raise error.TestFail('Writes made no progress')
        utils.nuke_subprocess(p1)
예제 #55
0
def main():
    coverage = os.path.join(root, "contrib/coverage.py")
    unittest_suite = os.path.join(root, "unittest_suite.py")

    # remove preceeding coverage data
    cmd = "%s -e" % (coverage)
    utils.system_output(cmd)

    # run unittest_suite through coverage analysis
    cmd = "%s -x %s" % (coverage, unittest_suite)
    utils.system_output(cmd)

    # now walk through directory grabbing lits of files
    module_strings = []
    for dirpath, dirnames, files in os.walk(root):
        if is_valid_directory(dirpath):
            for f in files:
                if is_valid_filename(f):
                    temp = os.path.join(dirpath, f)
                    module_strings.append(temp)

    # analyze files
    cmd = "%s -r -m %s" % (coverage, " ".join(module_strings))
    utils.system(cmd)
예제 #56
0
    def tar_package(self, pkg_name, src_dir, dest_dir, exclude_string=None):
        '''
        Create a tar.bz2 file with the name 'pkg_name' say test-blah.tar.bz2.
        Excludes the directories specified in exclude_string while tarring
        the source. Returns the tarball path.
        '''
        tarball_path = os.path.join(dest_dir, pkg_name)
        temp_path = tarball_path + '.tmp'
        cmd_list = ['tar', '-cf', temp_path, '-C', src_dir]
        if _PBZIP2_AVAILABLE:
            cmd_list.append('--use-compress-prog=pbzip2')
        else:
            cmd_list.append('-j')
        if exclude_string is not None:
            cmd_list.append(exclude_string)

        try:
            utils.system(' '.join(cmd_list))
        except:
            os.unlink(temp_path)
            raise

        os.rename(temp_path, tarball_path)
        return tarball_path
예제 #57
0
 def _check_unittest(self):
     """
     Verifies if the file in question has a unittest suite, if so, run the
     unittest and report on any failures. This is important to keep our
     unit tests up to date.
     """
     if "unittest" not in self.basename:
         stripped_name = self.basename.strip(".py")
         unittest_name = stripped_name + "_unittest.py"
         unittest_path = self.path.replace(self.basename, unittest_name)
         if os.path.isfile(unittest_path):
             unittest_cmd = 'python %s' % unittest_path
             rc = utils.system(unittest_cmd, ignore_status=True)
             if rc != 0:
                 logging.error("Unittest issues found during '%s'",
                               unittest_cmd)
예제 #58
0
    def report(self, dest_dir = None, test = None):
        if test:
            leak_check_info = self.parse_info()
            if leak_check_info:
                test.write_perf_keyval(leak_check_info)

        if dest_dir:
            utils.system('mkdir -p %s' % dest_dir)
            utils.system('rm -rf %s/*' % dest_dir)
            utils.system('cp -rp %s/kedr_leak_check/* %s' % (self._debugfs_mount_point, dest_dir))
            logging.info('KEDR leak checker result was stored into %s' % dest_dir)
예제 #59
0
 def report(self):
     """
     Executes all required checks, if problems are found, the possible
     corrective actions are listed.
     """
     self._check_permissions()
     if self.is_python:
         self._check_indent()
         self._check_code()
         self._check_unittest()
     if self.corrective_actions:
         for action in self.corrective_actions:
             answer = ask("Would you like to execute %s?" % action,
                          auto=self.confirm)
             if answer == "y":
                 rc = utils.system(action, ignore_status=True)
                 if rc != 0:
                     logging.error("Error executing %s" % action)
예제 #60
0
def run_module_probe(test, params, env):
    """
    load/unload kernel modules several times.

    The test can run in two modes:

    - based on previous 'build' test: in case kernel modules were installed by a
      'build' test, we used the modules installed by the previous test.

    - based on own params: if no previous 'build' test was run,
      we assume pre-installed kernel modules.
    """
    installer_object = env.previous_installer()
    if installer_object is None:
        installer_object = base_installer.NoopInstaller('noop',
                                                        'module_probe',
                                                        test, params)
    logging.debug('installer object: %r', installer_object)

    # unload the modules before starting:
    installer_object.unload_modules()

    load_count = int(params.get("load_count", 100))
    try:
        for i in range(load_count):
            try:
                installer_object.load_modules()
            except Exception,e:
                raise error.TestFail("Failed to load modules [%r]: %s" %
                                     (installer_object.module_list, e))

            # unload using rmmod directly because utils.unload_module() (used by
            # installer) does too much (runs lsmod, checks for dependencies),
            # and we want to run the loop as fast as possible.
            for mod in reversed(installer_object.module_list):
                r = utils.system("rmmod %s" % (mod), ignore_status=True)
                if r <> 0:
                    raise error.TestFail("Failed to unload module %s. "
                                         "exit status: %d" % (mod, r))
    finally:
        installer_object.load_modules()