Example #1
0
def get_mappings_post_2x():
    KERNEL_BASE_URL = settings.get_value('CLIENT', 'kernel_mirror', default='')
    GITWEB_BASE_URL = settings.get_value('CLIENT', 'kernel_gitweb', default='')
    STABLE_GITWEB_BASE_URL = settings.get_value('CLIENT', 'stable_kernel_gitweb', default='')

    MAPPINGS_POST_2X = [
        [r'^\d+\.\d+$', '', True,
         map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz', GITWEB_BASE_URL.split())
         ],
        [r'^\d+\.\d+\.\d+$', '', True,
         map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz', STABLE_GITWEB_BASE_URL.split())
         ],
        [r'-rc\d+$', '', True,
         map(lambda x: x + 'v%(major)s/testing/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz', GITWEB_BASE_URL.split())
         ],
        [r'[a-f0-9]{7,40}$', '', True,
         map(lambda x: x + ';a=snapshot;h=%(full)s;sf=tgz', GITWEB_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=%(full)s;sf=tgz', STABLE_GITWEB_BASE_URL.split())
         ]
    ]

    return MAPPINGS_POST_2X
Example #2
0
    def load_profiler(self, profiler, args, dargs):
        prof_dir = os.path.join(self.job.autodir, "profilers", profiler)

        try:
            self.job.install_pkg(profiler, "profiler", prof_dir)
        except error.PackageInstallError:
            pass

        if not os.path.exists(prof_dir):
            raise profiler_manager.ProfilerNotPresentError(profiler)

        profiler_module = common.setup_modules.import_module(
            profiler, "autotest.client.profilers.%s" % profiler)

        newprofiler = getattr(profiler_module, profiler)(self.job)

        newprofiler.name = profiler
        newprofiler.bindir = os.path.join(prof_dir)
        try:
            autodir = os.path.abspath(os.environ['AUTODIR'])
        except KeyError:
            autodir = settings.get_value('COMMON', 'autotest_top_path')
        tmpdir = os.path.join(autodir, 'tmp')
        output_config = settings.get_value('COMMON', 'test_output_dir',
                                           default=tmpdir)
        newprofiler.srcdir = os.path.join(output_config,
                                          os.path.basename(newprofiler.bindir),
                                          'src')
        newprofiler.tmpdir = os.path.join(self.tmpdir, profiler)
        newprofiler.initialize(*args, **dargs)
        utils.update_version(newprofiler.srcdir, newprofiler.preserve_srcdir,
                             newprofiler.version, newprofiler.setup,
                             *args, **dargs)

        return newprofiler
Example #3
0
    def _initialize_dir_properties(self):
        """
        Initializes all the secondary self.*dir properties. Requires autodir,
        clientdir and serverdir to already be initialized.
        """
        # create some stubs for use as shortcuts
        def readonly_dir(*args):
            return self._job_directory(os.path.join(*args))

        def readwrite_dir(*args):
            return self._job_directory(os.path.join(*args), True)

        # various client-specific directories
        self._bindir = readonly_dir(self.clientdir)
        self._configdir = readonly_dir(self.clientdir, 'config')
        self._profdir = readonly_dir(self.clientdir, 'profilers')
        self._toolsdir = readonly_dir(self.clientdir, 'tools')

        try:
            autodir = os.path.abspath(os.environ['AUTODIR'])
        except KeyError:
            autodir = settings.get_value('COMMON', 'autotest_top_path')

        tmpdir = os.path.join(autodir, 'tmp')

        test_out_dir = settings.get_value('COMMON', 'test_output_dir',
                                          default=tmpdir)

        if self.serverdir:
            root = self.serverdir
        else:
            root = self.clientdir

        test_dir = settings.get_value('COMMON', 'test_dir',
                                      default=os.path.join(root, 'tests'))

        self._tmpdir = readwrite_dir(test_out_dir, 'tmp')

        # special case packages for backwards compatibility
        pkg_dir = test_out_dir
        if pkg_dir == self.serverdir:
            pkg_dir = self.clientdir
        self._pkgdir = readwrite_dir(pkg_dir, 'packages')

        # Now tests are read-only modules
        self._testdir = readonly_dir(test_dir)
        self._site_testdir = readwrite_dir(test_out_dir, 'site_tests')

        # various server-specific directories
        if self.serverdir:
            # This is a NOT a flexible solution, but I feel that this does
            # not deserve a configuration option by itself. So, let's just
            # try to find conmux in the system and let the user know about it
            if os.path.isdir('/usr/share/conmux'):
                self._conmuxdir = readonly_dir('/usr/share/conmux')
            else:
                self._conmuxdir = readonly_dir(self.autodir, 'conmux')
            logging.info('conmux directory set to: %s' % self._conmuxdir)
        else:
            self._conmuxdir = None
Example #4
0
 def __init__(self, job, bindir, outputdir):
     self.job = job
     self.pkgmgr = job.pkgmgr
     self.autodir = job.autodir
     self.outputdir = outputdir
     self.tagged_testname = os.path.basename(self.outputdir)
     self.resultsdir = os.path.join(self.outputdir, 'results')
     os.mkdir(self.resultsdir)
     self.profdir = os.path.join(self.outputdir, 'profiling')
     os.mkdir(self.profdir)
     self.debugdir = os.path.join(self.outputdir, 'debug')
     os.mkdir(self.debugdir)
     if getpass.getuser() == 'root':
         self.configure_crash_handler()
     else:
         self.crash_handling_enabled = False
     self.bindir = bindir
     try:
         autodir = os.path.abspath(os.environ['AUTODIR'])
     except KeyError:
         autodir = settings.get_value('COMMON', 'autotest_top_path')
     tmpdir = os.path.join(autodir, 'tmp')
     output_config = settings.get_value('COMMON', 'test_output_dir',
                                        default=tmpdir)
     self.srcdir = os.path.join(output_config, os.path.basename(self.bindir),
                                'src')
     self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
                                    dir=job.tmpdir)
     self._keyvals = []
     self._new_keyval = False
     self.failed_constraints = []
     self.iteration = 0
     self.before_iteration_hooks = []
     self.after_iteration_hooks = []
Example #5
0
    def result_notify(self, job, email_from, email_to):
        """
        Notify about the result of a job. Will always print, if email data
        is provided, will send email for it as well.

            job: job object to notify about
            email_from: send notification email upon completion from here
            email_from: send notification email upon completion to here
        """
        if job.result == True:
            subject = 'Testing PASSED: '
        else:
            subject = 'Testing FAILED: '
        subject += '%s : %s\n' % (job.name, job.id)
        text = []
        for platform in job.results_platform_map:
            for status in job.results_platform_map[platform]:
                if status == 'Total':
                    continue
                for host in job.results_platform_map[platform][status]:
                    text.append('%20s %10s %10s' % (platform, status, host))
                    if status == 'Failed':
                        for test_status in job.test_status[host].fail:
                            text.append('(%s, %s) : %s' % \
                                        (host, test_status.test_name,
                                         test_status.reason))
                        text.append('')

        base_url = 'http://' + self.server

        params = ('columns=test',
                  'rows=machine_group',
                  "condition=tag~'%s-%%25'" % job.id,
                  'title=Report')
        query_string = '&'.join(params)
        url = '%s/tko/compose_query.cgi?%s' % (base_url, query_string)
        text.append(url + '\n')
        url = '%s/afe/#tab_id=view_job&object_id=%s' % (base_url, job.id)
        text.append(url + '\n')

        smtp_info = {}
        smtp_info['server'] = settings.get_value('SERVER', 'smtp_server',
                                                default='localhost')
        smtp_info['port'] = settings.get_value('SERVER', 'smtp_port',
                                               default='')
        smtp_info['user'] = settings.get_value('SERVER', 'smtp_user',
                                               default='')
        smtp_info['password'] = settings.get_value('SERVER', 'smtp_password',
                                                   default='')

        body = '\n'.join(text)
        print '---------------------------------------------------'
        print 'Subject: ', subject
        print body
        print '---------------------------------------------------'
        if email_from and email_to:
            print 'Sending email ...'
            utils.send_email(email_from, email_to, subject, body, smtp_info)
        print
Example #6
0
    def _initialize_dir_properties(self):
        """
        Initializes all the secondary self.*dir properties. Requires autodir,
        clientdir and serverdir to already be initialized.
        """
        # create some stubs for use as shortcuts
        def readonly_dir(*args):
            return self._job_directory(os.path.join(*args))

        def readwrite_dir(*args):
            return self._job_directory(os.path.join(*args), True)

        # various client-specific directories
        self._bindir = readonly_dir(self.clientdir)
        self._configdir = readonly_dir(self.clientdir, 'config')
        self._profdir = readonly_dir(self.clientdir, 'profilers')
        self._toolsdir = readonly_dir(self.clientdir, 'tools')

        try:
            autodir = os.path.abspath(os.environ['AUTODIR'])
        except KeyError:
            autodir = settings.get_value('COMMON', 'autotest_top_path')

        tmpdir = os.path.join(autodir, 'tmp')

        test_out_dir = settings.get_value('COMMON', 'test_output_dir',
                                          default=tmpdir)

        if self.serverdir:
            root = self.serverdir
        else:
            root = self.clientdir

        test_dir = settings.get_value('COMMON', 'test_dir',
                                      default=os.path.join(root, 'tests'))

        self._tmpdir = readwrite_dir(test_out_dir, 'tmp')

        # special case packages for backwards compatibility
        pkg_dir = test_out_dir
        if pkg_dir == self.serverdir:
            pkg_dir = self.clientdir
        self._pkgdir = readwrite_dir(pkg_dir, 'packages')

        # Now tests are read-only modules
        self._testdir = readonly_dir(test_dir)
        self._site_testdir = readwrite_dir(test_out_dir, 'site_tests')

        # various server-specific directories
        if self.serverdir:
            self._conmuxdir = readonly_dir(self.autodir, 'conmux')
        else:
            self._conmuxdir = None
Example #7
0
def check_diskspace(repo, min_free=None):
    '''
    Check if the remote directory over at the pkg repo has available diskspace

    If the amount of free space is not supplied, it is taken from the global
    configuration file, section [PACKAGES], key 'mininum_free_space'. The unit
    used are in SI, that is, 1 GB = 10**9 bytes.

    @type repo: string
    @param repo: a remote package repo URL
    @type min_free: int
    @param: min_free mininum amount of free space, in GB (10**9 bytes)
    :raise error.RepoUnknownError: general repository error condition
    :raise error.RepoDiskFullError: repository does not have at least the
        requested amount of free disk space.
    '''
    if min_free is None:
        min_free = settings.get_value('PACKAGES', 'minimum_free_space',
                                      type=int, default=1)
    try:
        df = repo_run_command(repo,
                              'df -PB %d . | tail -1' % 10 ** 9).stdout.split()
        free_space_gb = int(df[3])
    except Exception, e:
        raise error.RepoUnknownError('Unknown Repo Error: %s' % e)
Example #8
0
    def _disable_host_installation(self, host):
        server_info = remote.get_install_server_info()
        if remote.install_server_is_configured():
            timeout = settings.get_value('INSTALL_SERVER',
                                         'default_install_timeout',
                                         type=int,
                                         default=3600)

            end_time = time.time() + (timeout / 10)
            step = int(timeout / 100)
            ServerInterface = remote.RemoteHost.INSTALL_SERVER_MAPPING[server_info['type']]
            server_interface = None
            while time.time() < end_time:
                try:
                    server_interface = ServerInterface(**server_info)
                    break
                except socket.error:
                    logging.error('Install server unavailable. Trying '
                                  'again in %s s...', step)
                    time.sleep(step)

            if server_interface is None:
                raise InstallServerUnavailable("%s install server at (%s) "
                                               "unavailable. Tried to "
                                               "communicate for %s s" %
                                               (server_info['type'],
                                                server_info['xmlrpc_url'],
                                                timeout / 10))

            server_interface._disable_host_installation(host)
Example #9
0
    def _load_state(self):
        autodir = os.path.abspath(os.environ['AUTODIR'])
        tmpdir = os.path.join(autodir, 'tmp')
        state_config = settings.get_value('COMMON', 'test_output_dir',
                                          default=tmpdir)
        if not os.path.isdir(state_config):
            os.makedirs(state_config)
        init_state_file =  os.path.join(state_config,
                                        ("%s.init.state" %
                                         os.path.basename(self.control)))
        self._state_file = os.path.join(state_config,
                                        ("%s.state" %
                                         os.path.basename(self.control)))

        if os.path.exists(init_state_file):
            shutil.move(init_state_file, self._state_file)
        self._state.set_backing_file(self._state_file)

        # initialize the state engine, if necessary
        has_steps = self._state.has('client', 'steps')
        if not self._is_continuation and has_steps:
            raise RuntimeError('Loaded state can only contain client.steps if '
                               'this is a continuation')

        if not has_steps:
            logging.debug('Initializing the state engine')
            self._state.set('client', 'steps', [])
Example #10
0
 def get_wait_up_processes(self):
     """ Gets the list of local processes to wait for in wait_up. """
     proc_list = settings.get_value("HOSTS", "wait_up_processes",
                                    default="").strip()
     processes = set(p.strip() for p in proc_list.split(","))
     processes.discard("")
     return processes
Example #11
0
    def reboot(self, tag=LAST_BOOT_TAG):
        if tag == LAST_BOOT_TAG:
            tag = self.last_boot_tag
        else:
            self.last_boot_tag = tag

        self.reboot_setup()
        self.harness.run_reboot()
        default = self.config_get("boot.set_default")
        if default:
            self.bootloader.set_default(tag)
        else:
            self.bootloader.boot_once(tag)

        # HACK: using this as a module sometimes hangs shutdown, so if it's
        # installed unload it first
        utils.system("modprobe -r netconsole", ignore_status=True)

        # sync first, so that a sync during shutdown doesn't time out
        utils.system("sync; sync", ignore_status=True)

        sleep_before_reboot = settings.get_value("CLIENT", "sleep_before_reboot", default="5")

        sleep_cmd = "(sleep %s; reboot) </dev/null >/dev/null 2>&1 &" % sleep_before_reboot
        utils.system(sleep_cmd)

        self.quit()
Example #12
0
    def __init__(self, path, user, server, print_log, debug, reply_debug):
        """
        Create a cached instance of a connection to the frontend

            user: username to connect as
            server: frontend server to connect to
            print_log: pring a logging message to stdout on every operation
            debug: print out all RPC traffic
        """
        if not user:
            user = getpass.getuser()
        if not server:
            if 'AUTOTEST_WEB' in os.environ:
                server = os.environ['AUTOTEST_WEB']
            else:
                server = settings.get_value('SERVER', 'hostname',
                                            default=DEFAULT_SERVER)
        self.server = server
        self.user = user
        self.print_log = print_log
        self.debug = debug
        self.reply_debug = reply_debug
        http_server = 'http://' + server
        headers = rpc_client_lib.authorization_headers(user, http_server)
        rpc_server = http_server + path
        if debug:
            print 'SERVER: %s' % rpc_server
            print 'HEADERS: %s' % headers
        self.proxy = rpc_client_lib.get_proxy(rpc_server, headers=headers)
Example #13
0
    def collect_stalled_info(self):
        INFO_TO_COLLECT = ["uptime", "ps auxwww", "iostat -k -x 2 4"]
        db_cmd = "/usr/bin/mysqladmin --verbose processlist -u%s -p%s"
        try:
            user = settings.get_value("BACKUP", "user")
            password = settings.get_value("BACKUP", "password")
            db_cmd %= (user, password)
            INFO_TO_COLLECT.append(db_cmd)
        except SettingsError:
            pass
        stall_log_path = self.log_path + ".stall_info"
        log = open(stall_log_path, "w")
        for cmd in INFO_TO_COLLECT:
            log.write(run_banner_output(cmd))

        log.close()
Example #14
0
 def _get_option(self, name, provided_value):
     if provided_value is not None:
         return provided_value
     if self.settings_section:
         settings_name = _GLOBAL_CONFIG_NAMES.get(name, name)
         return settings.get_value(self.settings_section, settings_name)
     return getattr(self, name, None)
Example #15
0
def get_server_status():
    """
    Get autotest server system information.

    :return: Dict with keys:
             * 'disk_space_percentage' Autotest log directory disk usage
             * 'scheduler_running' Whether the autotest scheduler is running
             * 'sheduler_watcher_running' Whether the scheduler watcher is
                running
             * 'concerns' Global evaluation of whether there are problems to
                be addressed
    """
    server_status = {}
    concerns = False
    disk_treshold = int(settings.get_value('SERVER', 'logs_disk_usage_treshold',
                                           default="80"))
    used_space_logs = _get_logs_used_space()
    if used_space_logs > disk_treshold:
        concerns = True
    server_status['used_space_logs'] = used_space_logs
    scheduler_running = _process_running('autotest-scheduler')
    if not scheduler_running:
        concerns = True
    server_status['scheduler_running'] = scheduler_running
    watcher_running = _process_running('autotest-scheduler-watcher')
    if not watcher_running:
        concerns = True
    server_status['scheduler_watcher_running'] = watcher_running
    install_server_running = get_install_server_profiles() is not None
    if not install_server_running:
        concerns = True
    server_status['install_server_running'] = install_server_running
    server_status['concerns'] = concerns
    return server_status
Example #16
0
 def _init_packages(self):
     """
     Perform the packages support initialization.
     """
     tmpdir = settings.get_value('COMMON', 'test_output_dir',
                                 default=self.autodir)
     self.pkgmgr = packages.PackageManager(
         tmpdir, run_function_dargs={'timeout':3600})
Example #17
0
    def __init__(self, module="scheduler"):
        """
        Initialize an email notification manager.

        :param subsystem: String describing the module this manager is
            handling. Example: 'scheduler'.
        """
        self.module = module
        self.email_queue = []

        self.html_email = settings.get_value("NOTIFICATION",
                                             "html_notify_email",
                                             type=bool,
                                             default=False)

        self.from_email = settings.get_value("NOTIFICATION",
                                             "notify_email_from",
                                             default=DEFAULT_FROM_EMAIL)

        self.grid_admin_email = settings.get_value("NOTIFICATION",
                                                   "grid_admin_email",
                                                   default='')

        server = settings.get_value("EMAIL", "smtp_server", default='localhost')
        port = settings.get_value("EMAIL", "smtp_port", default=None)
        user = settings.get_value("EMAIL", "smtp_user", default=None)
        password = settings.get_value("EMAIL", "smtp_password", default=None)

        self.smtp_info = {'server': server,
                          'port': port,
                          'user': user,
                          'password': password}
Example #18
0
    def _load_config(self, host, database, user, password):
        # grab the host, database
        if host:
            self.host = host
        else:
            self.host = settings.get_value("AUTOTEST_WEB", "host")
        if database:
            self.database = database
        else:
            self.database = settings.get_value("AUTOTEST_WEB", "database")

        # grab the user and password
        if user:
            self.user = user
        else:
            self.user = settings.get_value("AUTOTEST_WEB", "user")
        if password is not None:
            self.password = password
        else:
            self.password = settings.get_value("AUTOTEST_WEB", "password")

        # grab the timeout configuration
        self.query_timeout = settings.get_value("AUTOTEST_WEB", "query_timeout",
                                       type=int, default=3600)
        self.min_delay = settings.get_value("AUTOTEST_WEB", "min_retry_delay", type=int,
                                   default=20)
        self.max_delay = settings.get_value("AUTOTEST_WEB", "max_retry_delay", type=int,
                                   default=60)
def main(args):

    if len(args) <= 1:
        usage()

    entries = args[1:]

    for entry in entries:
        try:
            section, var = entry.split('.')
        except ValueError:
            print "Invalid SECTION.varable supplied: " + entry
            usage()

        try:
            print settings.get_value(section, var)
        except SettingsError:
            print "Error reading %s.%s" % (section, var)
def configure_logging():
    MAX_LOG_SIZE = settings.get_value('SERVER', 'rpc_max_log_size_mb', type=int)
    NUMBER_OF_OLD_LOGS = settings.get_value('SERVER', 'rpc_num_old_logs',
                                            type=int)
    log_path = settings.get_value('SERVER', 'rpc_log_path')

    formatter = logging.Formatter(
        fmt='[%(asctime)s %(levelname)-5.5s] %(message)s',
        datefmt='%m/%d %H:%M:%S')
    handler = logging.handlers.RotatingFileHandler(log_path,
                                                   maxBytes=MAX_LOG_SIZE * MEGABYTE,
                                                   backupCount=NUMBER_OF_OLD_LOGS)
    handler.setFormatter(formatter)

    global rpc_logger
    rpc_logger = logging.getLogger('rpc_logger')
    rpc_logger.addHandler(handler)
    rpc_logger.propagate = False
    rpc_logger.setLevel(logging.DEBUG)
Example #21
0
 def _init_drop_caches(self, drop_caches):
     """
     Perform the drop caches initialization.
     """
     self.drop_caches_between_iterations = (settings.get_value('CLIENT',
                                            'drop_caches_between_iterations',
                                            type=bool, default=True))
     self.drop_caches = drop_caches
     if self.drop_caches:
         utils.drop_caches()
Example #22
0
    def _get_max_pidfile_refreshes(self):
        """
        Normally refresh() is called on every monitor_db.Dispatcher.tick().

        @returns: The number of refresh() calls before we forget a pidfile.
        """
        pidfile_timeout = settings.get_value(
                       scheduler_config.CONFIG_SECTION, 'max_pidfile_refreshes',
                       type=int, default=2000)
        return pidfile_timeout
Example #23
0
 def _init_drop_caches(self, drop_caches):
     """
     Perform the drop caches initialization.
     """
     self.drop_caches_between_iterations = settings.get_value(
         "CLIENT", "drop_caches_between_iterations", type=bool, default=True
     )
     self.drop_caches = drop_caches
     if self.drop_caches:
         utils_memory.drop_caches()
Example #24
0
def _get_logs_used_space():
    """
    Return disk usage (percentage) for the results directory.
    """
    logs_dir = settings.get_value("COMMON", "test_output_dir", default=None)
    autodir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
    if logs_dir is None:
        logs_dir = os.path.join(autodir, "results")
    usage = psutil.disk_usage(logs_dir)
    return int(usage.percent)
    def archive_results(self, path):
        results_host = settings.get_value('SCHEDULER', 'results_host',
                                          default=None)
        if not results_host or results_host == 'localhost':
            return

        if not path.endswith('/'):
            path += '/'

        logging.info('Archiving %s to %s', path, results_host)
        utility = drone_utility.DroneUtility()
        utility.sync_send_file_to(results_host, path, path, can_fail=True)
Example #26
0
def get_mappings_2x():
    KERNEL_BASE_URL = settings.get_value('CLIENT', 'kernel_mirror', default='')
    GITWEB_BASE_URL = settings.get_value('CLIENT', 'kernel_gitweb', default='')
    STABLE_GITWEB_BASE_URL = settings.get_value('CLIENT', 'stable_kernel_gitweb', default='')

    MAPPINGS_2X = [
        [r'^\d+\.\d+$', '', True,
         map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz', GITWEB_BASE_URL.split())
         ],
        [r'^\d+\.\d+\.\d+$', '', True,
         map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz', GITWEB_BASE_URL.split())
         ],
        [r'^\d+\.\d+\.\d+\.\d+$', '', True,
         map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz', STABLE_GITWEB_BASE_URL.split())
         ],
        [r'-rc\d+$', '%(minor-prev)s', True,
         map(lambda x: x + 'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + 'v%(major)s/testing/linux-%(full)s.tar.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz', GITWEB_BASE_URL.split())
         ],
        [r'-(git|bk)\d+$', '%(base)s', False,
         map(lambda x: x + 'v%(major)s/snapshots/old/patch-%(full)s.bz2', KERNEL_BASE_URL.split()) +
         map(lambda x: x + 'v%(major)s/snapshots/patch-%(full)s.bz2', KERNEL_BASE_URL.split())
         ],
        [r'-mm\d+$', '%(base)s', False,
         map(lambda x: x + 'people/akpm/patches/' + '%(major)s/%(base)s/%(full)s/%(full)s.bz2', KERNEL_BASE_URL.split())
         ],
        [r'-mjb\d+$', '%(base)s', False,
         map(lambda x: x + 'people/mbligh/%(base)s/patch-%(full)s.bz2', KERNEL_BASE_URL.split())
         ],
        [r'[a-f0-9]{7,40}$', '', True,
         map(lambda x: x + ';a=snapshot;h=%(full)s;sf=tgz', GITWEB_BASE_URL.split()) +
         map(lambda x: x + ';a=snapshot;h=%(full)s;sf=tgz', STABLE_GITWEB_BASE_URL.split())
         ]
    ]

    return MAPPINGS_2X
Example #27
0
    def refresh_drone_configs(self):
        """
        Reread global config options for all drones.
        """
        section = scheduler_config.CONFIG_SECTION
        settings.parse_config_file()
        for hostname, drone in self._drones.iteritems():
            disabled = settings.get_value(section, '%s_disabled' % hostname,
                                          default='')
            drone.enabled = not bool(disabled)

            drone.max_processes = settings.get_value(
                    section, '%s_max_processes' % hostname, type=int,
                    default=scheduler_config.config.max_processes_per_drone)

            allowed_users = settings.get_value(section, '%s_users' % hostname,
                                               default=None)
            if allowed_users is not None:
                allowed_users = set(allowed_users.split())
            drone.allowed_users = allowed_users

        self._reorder_drone_queue() # max_processes may have changed
Example #28
0
def get_autotest_server(web_server=None):
    if not web_server:
        if "AUTOTEST_WEB" in os.environ:
            web_server = os.environ["AUTOTEST_WEB"]
        else:
            web_server = "http://" + settings.get_value("SERVER", "hostname", default=DEFAULT_SERVER)

    # if the name doesn't start with http://,
    # nonexistant hosts get an obscure error
    if not web_server.startswith("http://"):
        web_server = "http://" + web_server

    return web_server
Example #29
0
def _get_logs_used_space():
    """
    (Internal) Return disk usage (percentage) for the results directory.

    :return: Usage in percents (integer value).
    """
    logs_dir = settings.get_value('COMMON', 'test_output_dir', default=None)
    autodir = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                           '..', '..'))
    if logs_dir is None:
        logs_dir = os.path.join(autodir, 'results')
    usage = psutil.disk_usage(logs_dir)
    return int(usage.percent)
Example #30
0
    def _send_tarball(self, pkg_name, remote_dest):
        name, pkg_type = self.job.pkgmgr.parse_tarball_name(pkg_name)
        src_dirs = []
        if pkg_type == 'test':
            test_dirs = ['site_tests', 'tests']
            # if test_dir is defined in global config
            # package the tests from there (if exists)
            settings_test_dirs = settings.get_value('COMMON', 'test_dir',
                                                    default="")
            if settings_test_dirs:
                test_dirs = settings_test_dirs.strip().split(',') + test_dirs
            for test_dir in test_dirs:
                src_dir = os.path.join(self.job.clientdir, test_dir, name)
                if os.path.exists(src_dir):
                    src_dirs += [src_dir]
                    if autoserv_prebuild:
                        prebuild.setup(self.job.clientdir, src_dir)
                    break
        elif pkg_type == 'profiler':
            src_dirs += [os.path.join(self.job.clientdir, 'profilers', name)]
            if autoserv_prebuild:
                prebuild.setup(self.job.clientdir, src_dir)
        elif pkg_type == 'dep':
            src_dirs += [os.path.join(self.job.clientdir, 'deps', name)]
        elif pkg_type == 'client':
            return  # you must already have a client to hit this anyway
        else:
            return  # no other types are supported

        # iterate over src_dirs until we find one that exists, then tar it
        for src_dir in src_dirs:
            if os.path.exists(src_dir):
                try:
                    logging.info('Bundling %s into %s', src_dir, pkg_name)
                    temp_dir = autotemp.tempdir(unique_id='autoserv-packager',
                                                dir=self.job.tmpdir)

                    exclude_paths = None
                    exclude_file_path = os.path.join(src_dir, ".pack_exclude")
                    if os.path.exists(exclude_file_path):
                        exclude_file = open(exclude_file_path)
                        exclude_paths = exclude_file.read().splitlines()
                        exclude_file.close()

                    tarball_path = self.job.pkgmgr.tar_package(
                        pkg_name, src_dir, temp_dir.name,
                        " .", exclude_paths)
                    self.host.send_file(tarball_path, remote_dest)
                finally:
                    temp_dir.clean()
                return
Example #31
0
class BaseAutotest(installable_object.InstallableObject):

    """
    This class represents the Autotest program.

    Autotest is used to run tests automatically and collect the results.
    It also supports profilers.

    Implementation details:
    This is a leaf class in an abstract class hierarchy, it must
    implement the unimplemented methods in parent classes.
    """

    def __init__(self, host=None):
        self.host = host
        self.got = False
        self.installed = False
        self.serverdir = utils.get_server_dir()
        self.os_vendor = client_utils.get_os_vendor()
        self.server_system_wide_install = _server_system_wide_install()
        super(BaseAutotest, self).__init__()

    install_in_tmpdir = False

    @classmethod
    def set_install_in_tmpdir(cls, flag):
        """
        Sets a flag that controls whether or not Autotest should by
        default be installed in a "standard" directory (e.g. /home/autotest) or
        a temporary directory.
        """
        cls.install_in_tmpdir = flag

    @classmethod
    def get_client_autodir_paths(cls, host):
        return settings.get_value('AUTOSERV', 'client_autodir_paths', type=list)

    @classmethod
    def get_installed_autodir(cls, host):
        """
        Find where the Autotest client is installed on the host.
        :return: an absolute path to an installed Autotest client root.
        :raise AutodirNotFoundError if no Autotest installation can be found.
        """
        autodir = host.get_autodir()
        if autodir:
            logging.debug('Using existing host autodir: %s', autodir)
            return autodir

        if not _server_system_wide_install():
            for path in Autotest.get_client_autodir_paths(host):
                try:
                    autotest_binary = os.path.join(path, CLIENT_BINARY)
                    host.run('test -x %s' % utils.sh_escape(autotest_binary))
                    host.run('test -w %s' % utils.sh_escape(path))
                    logging.debug('Found existing autodir at %s', path)
                    return path
                except error.AutoservRunError:
                    logging.debug('%s does not exist on %s', autotest_binary,
                                  host.hostname)
        else:
            for path in Autotest.get_client_autodir_paths(host):
                host.run('test -w %s' % utils.sh_escape(path))
                logging.debug('Found existing autodir at %s', path)
                host.autodir = path
                return path

        raise AutodirNotFoundError

    @classmethod
    def get_install_dir(cls, host):
        """
        Determines the location where autotest should be installed on
        host. If self.install_in_tmpdir is set, it will return a unique
        temporary directory that autotest can be installed in. Otherwise, looks
        for an existing installation to use; if none is found, looks for a
        usable directory in the global config client_autodir_paths.
        """
        try:
            install_dir = cls.get_installed_autodir(host)
        except AutodirNotFoundError:
            install_dir = cls._find_installable_dir(host)

        if cls.install_in_tmpdir:
            return host.get_tmp_dir(parent=install_dir)
        return install_dir

    @classmethod
    def _find_installable_dir(cls, host):
        client_autodir_paths = cls.get_client_autodir_paths(host)
        for path in client_autodir_paths:
            try:
                host.run('mkdir -p %s' % utils.sh_escape(path))
                host.run('test -w %s' % utils.sh_escape(path))
                return path
            except error.AutoservRunError:
                logging.debug('Failed to create %s', path)
        raise error.AutoservInstallError(
            'Unable to find a place to install Autotest; tried %s' %
            ', '.join(client_autodir_paths))

    def _create_test_output_dir(self, host, autodir):
        tmpdir = os.path.join(autodir, 'tmp')
        state_autodir = settings.get_value('COMMON', 'test_output_dir',
                                           default=tmpdir)
        host.run('mkdir -p %s' % utils.sh_escape(state_autodir))

    def get_fetch_location(self):
        repos = settings.get_value("PACKAGES", 'fetch_location', type=list,
                                   default=[])
        repos.reverse()
        return repos

    def install(self, host=None, autodir=None):
        self._install(host=host, autodir=autodir)

    def install_full_client(self, host=None, autodir=None):
        self._install(host=host, autodir=autodir, use_autoserv=False,
                      use_packaging=False)

    def install_no_autoserv(self, host=None, autodir=None):
        self._install(host=host, autodir=autodir, use_autoserv=False)

    def _install_using_packaging(self, host, autodir):
        repos = self.get_fetch_location()
        if not repos:
            raise error.PackageInstallError("No repos to install an "
                                            "autotest client from")
        pkgmgr = packages.PackageManager(autodir, hostname=host.hostname,
                                         repo_urls=repos,
                                         do_locking=False,
                                         run_function=host.run,
                                         run_function_dargs=dict(timeout=600))
        # The packages dir is used to store all the packages that
        # are fetched on that client. (for the tests,deps etc.
        # too apart from the client)
        pkg_dir = os.path.join(autodir, 'packages')
        # clean up the autodir except for the packages directory
        host.run('cd %s && ls | grep -v "^packages$"'
                 ' | xargs rm -rf && rm -rf .[^.]*' % autodir)
        pkgmgr.install_pkg('autotest', 'client', pkg_dir, autodir,
                           preserve_install_dir=True)
        self._create_test_output_dir(host, autodir)
        logging.info("Installation of autotest completed")
        self.installed = True

    def _install_using_send_file(self, host, autodir):
        dirs_to_exclude = set(["tests", "site_tests", "deps", "profilers"])
        light_files = [os.path.join(self.source_material, f)
                       for f in os.listdir(self.source_material)
                       if f not in dirs_to_exclude]

        # there should be one and only one grubby tarball
        grubby_glob = os.path.join(self.source_material,
                                   "deps/grubby/grubby-*.tar.bz2")
        grubby_tarball_paths = glob.glob(grubby_glob)
        if grubby_tarball_paths:
            grubby_tarball_path = grubby_tarball_paths[0]
            if os.path.exists(grubby_tarball_path):
                light_files.append(grubby_tarball_path)

        host.send_file(light_files, autodir, delete_dest=True)

        profilers_autodir = os.path.join(autodir, 'profilers')
        profilers_init = os.path.join(self.source_material, 'profilers',
                                      '__init__.py')
        host.run("mkdir -p %s" % profilers_autodir)
        host.send_file(profilers_init, profilers_autodir, delete_dest=True)
        dirs_to_exclude.discard("profilers")

        # create empty dirs for all the stuff we excluded
        commands = []
        for path in dirs_to_exclude:
            abs_path = os.path.join(autodir, path)
            abs_path = utils.sh_escape(abs_path)
            commands.append("mkdir -p '%s'" % abs_path)
            commands.append("touch '%s'/__init__.py" % abs_path)
        host.run(';'.join(commands))

    def _install(self, host=None, autodir=None, use_autoserv=True,
                 use_packaging=True):
        """
        Install autotest.

        :param host A Host instance on which autotest will be installed
        :param autodir Location on the remote host to install to
        :param use_autoserv Enable install modes that depend on the client
            running with the autoserv harness
        :param use_packaging Enable install modes that use the packaging system

        @exception AutoservError If it wasn't possible to install the client
                after trying all available methods
        """
        if not host:
            host = self.host
        if not self.got:
            self.get()
        host.wait_up(timeout=30)
        host.setup()
        logging.info("Installing autotest on %s", host.hostname)

        if self.server_system_wide_install:
            msg_install = ("Autotest seems to be installed in the "
                           "client on a system wide location, proceeding...")

            logging.info("Verifying client package install")
            if _client_system_wide_install(host):
                logging.info(msg_install)
                self.installed = True
                return

            install_cmd = INSTALL_CLIENT_CMD_MAPPING.get(self.os_vendor, None)
            if install_cmd is not None:
                logging.info(msg_install)
                host.run(install_cmd)
                if _client_system_wide_install(host):
                    logging.info("Autotest seems to be installed in the "
                                 "client on a system wide location, proceeding...")
                    self.installed = True
                    return

            raise error.AutoservError("The autotest client package "
                                      "does not seem to be installed "
                                      "on %s" % host.hostname)

        # set up the autotest directory on the remote machine
        if not autodir:
            autodir = self.get_install_dir(host)
        logging.info('Using installation dir %s', autodir)
        host.set_autodir(autodir)
        host.run('mkdir -p %s' % utils.sh_escape(autodir))

        # make sure there are no files in $AUTODIR/results
        results_path = os.path.join(autodir, 'results')
        host.run('rm -rf %s/*' % utils.sh_escape(results_path),
                 ignore_status=True)

        # Fetch the autotest client from the nearest repository
        if use_packaging:
            try:
                self._install_using_packaging(host, autodir)
                self._create_test_output_dir(host, autodir)
                logging.info("Installation of autotest completed")
                self.installed = True
                return
            except (error.PackageInstallError, error.AutoservRunError,
                    SettingsError), e:
                logging.info("Could not install autotest using the packaging "
                             "system: %s. Trying other methods", e)

        # try to install from file or directory
        if self.source_material:
            supports_autoserv_packaging = settings.get_value("PACKAGES",
                                                             "serve_packages_from_autoserv",
                                                             type=bool)
            # Copy autotest recursively
            if supports_autoserv_packaging and use_autoserv:
                self._install_using_send_file(host, autodir)
            else:
                host.send_file(self.source_material, autodir, delete_dest=True)
            self._create_test_output_dir(host, autodir)
            logging.info("Installation of autotest completed")
            self.installed = True
            return

        raise error.AutoservError('Could not install autotest on '
                                  'target machine: %s' % host.name)
Example #32
0
    def __init__(self, job, harness_args):
        """
                job
                        The job object for this job
        """
        self.autodir = os.path.abspath(os.environ['AUTODIR'])
        self.setup(job)

        tmpdir = os.path.join(self.autodir, 'tmp')
        tests_dir = settings.get_value('COMMON',
                                       'test_output_dir',
                                       default=tmpdir)

        src = job.control_get()
        dest = os.path.join(tests_dir, 'control')
        if os.path.abspath(src) != os.path.abspath(dest):
            shutil.copyfile(src, dest)
            job.control_set(dest)

        def yield_default_initlevel():
            """
            If we really can't figure out something better, default to '2',
            which is the case with some debian systems
            """
            init_default = '2'
            logging.error('Could not determine initlevel, assuming %s' %
                          init_default)
            return init_default

        rc = os.path.join(self.autodir, 'tools/autotest')
        # see if system supports event.d versus systemd versus inittab
        supports_eventd = os.path.exists('/etc/event.d')
        supports_systemd = os.path.exists('/etc/systemd')
        supports_inittab = os.path.exists('/etc/inittab')
        # This is the best heuristics I can think of for identifying
        # an embedded system running busybox
        busybox_system = (os.readlink('/bin/sh') == 'busybox')

        # Small busybox systems usually use /etc/rc.d/ straight
        if busybox_system:
            initdefault = ''

        elif supports_eventd or supports_systemd:
            try:
                # NB: assuming current runlevel is default
                cmd_result = utils.run('/sbin/runlevel', verbose=False)
                initdefault = cmd_result.stdout.split()[1]
            except (error.CmdError, IndexError):
                initdefault = yield_default_initlevel()

        elif supports_inittab:
            try:
                cmd_result = utils.run('grep :initdefault: /etc/inittab',
                                       verbose=False)
                initdefault = cmd_result.stdout.split(':')[1]
            except (error.CmdError, IndexError):
                initdefault = yield_default_initlevel()

        else:
            initdefault = yield_default_initlevel()

        vendor = distro.detect().name
        service = '/etc/init.d/autotest'
        if vendor == 'SUSE':
            service_link = '/etc/init.d/rc%s.d/S99autotest' % initdefault
        else:
            service_link = '/etc/rc%s.d/S99autotest' % initdefault
        try:
            if os.path.islink(service):
                os.remove(service)
            if os.path.islink(service_link):
                os.remove(service_link)
            os.symlink(rc, service)
            os.symlink(rc, service_link)
        except (OSError, IOError):
            logging.info(
                "Could not symlink init scripts (lack of permissions)")
Example #33
0
import glob
import logging
import os
import shutil
import socket
import tempfile
import time
import traceback

from autotest.client.shared import autotemp, error
from autotest.client.shared.settings import settings
from autotest.server import utils, autotest_remote
from autotest.server.hosts import remote

enable_master_ssh = settings.get_value('AUTOSERV',
                                       'enable_master_ssh',
                                       type=bool,
                                       default=False)


def _make_ssh_cmd_default(user="******",
                          port=22,
                          opts='',
                          hosts_file='/dev/null',
                          connect_timeout=30,
                          alive_interval=300):
    base_command = ("/usr/bin/ssh -a -x %s -o StrictHostKeyChecking=no "
                    "-o UserKnownHostsFile=%s -o BatchMode=yes "
                    "-o ConnectTimeout=%d -o ServerAliveInterval=%d "
                    "-l %s -p %d")
    assert isinstance(connect_timeout, (int, long))
    assert connect_timeout > 0  # can't disable the timeout
Example #34
0
class AbstractSSHHost(SiteHost):
    """
    This class represents a generic implementation of most of the
    framework necessary for controlling a host via ssh. It implements
    almost all of the abstract Host methods, except for the core
    Host.run method.
    """
    def _initialize(self,
                    hostname,
                    user="******",
                    port=22,
                    password="",
                    *args,
                    **dargs):
        super(AbstractSSHHost, self)._initialize(hostname=hostname,
                                                 *args,
                                                 **dargs)
        self.ip = socket.getaddrinfo(self.hostname, None)[0][4][0]
        self.user = user
        self.port = port
        self.password = password
        self._use_rsync = None
        self.known_hosts_file = tempfile.mkstemp()[1]
        """
        Master SSH connection background job, socket temp directory and socket
        control path option. If master-SSH is enabled, these fields will be
        initialized by start_master_ssh when a new SSH connection is initiated.
        """
        self.master_ssh_job = None
        self.master_ssh_tempdir = None
        self.master_ssh_option = ''

    def use_rsync(self):
        if self._use_rsync is not None:
            return self._use_rsync

        # Check if rsync is available on the remote host. If it's not,
        # don't try to use it for any future file transfers.
        self._use_rsync = self._check_rsync()
        if not self._use_rsync:
            logging.warn("rsync not available on remote host %s -- disabled",
                         self.hostname)
        return self._use_rsync

    def _check_rsync(self):
        """
        Check if rsync is available on the remote host.
        """
        try:
            self.run("rsync --version", stdout_tee=None, stderr_tee=None)
        except error.AutoservRunError:
            return False
        return True

    def _encode_remote_paths(self, paths, escape=True):
        """
        Given a list of file paths, encodes it as a single remote path, in
        the style used by rsync and scp.
        """
        if escape:
            paths = [utils.scp_remote_escape(path) for path in paths]
        return '%s@%s:"%s"' % (self.user, self.hostname, " ".join(paths))

    def _make_rsync_cmd(self, sources, dest, delete_dest, preserve_symlinks):
        """
        Given a list of source paths and a destination path, produces the
        appropriate rsync command for copying them. Remote paths must be
        pre-encoded.
        """
        ssh_cmd = make_ssh_command(user=self.user,
                                   port=self.port,
                                   opts=self.master_ssh_option,
                                   hosts_file=self.known_hosts_file)
        if delete_dest:
            delete_flag = "--delete"
        else:
            delete_flag = ""
        if preserve_symlinks:
            symlink_flag = ""
        else:
            symlink_flag = "-L"
        command = "rsync %s %s --timeout=1800 --rsh='%s' -az %s %s"
        return command % (symlink_flag, delete_flag, ssh_cmd,
                          " ".join(sources), dest)

    def _make_ssh_cmd(self, cmd):
        """
        Create a base ssh command string for the host which can be used
        to run commands directly on the machine
        """
        base_cmd = make_ssh_command(user=self.user,
                                    port=self.port,
                                    opts=self.master_ssh_option,
                                    hosts_file=self.known_hosts_file)

        return '%s %s "%s"' % (base_cmd, self.hostname, utils.sh_escape(cmd))

    def _make_scp_cmd(self, sources, dest):
        """
        Given a list of source paths and a destination path, produces the
        appropriate scp command for encoding it. Remote paths must be
        pre-encoded.
        """
        command = ("scp -rq %s -o StrictHostKeyChecking=no "
                   "-o UserKnownHostsFile=%s -P %d %s '%s'")
        return command % (self.master_ssh_option, self.known_hosts_file,
                          self.port, " ".join(sources), dest)

    def _make_rsync_compatible_globs(self, path, is_local):
        """
        Given an rsync-style path, returns a list of globbed paths
        that will hopefully provide equivalent behaviour for scp. Does not
        support the full range of rsync pattern matching behaviour, only that
        exposed in the get/send_file interface (trailing slashes).

        The is_local param is flag indicating if the paths should be
        interpreted as local or remote paths.
        """

        # non-trailing slash paths should just work
        if len(path) == 0 or path[-1] != "/":
            return [path]

        # make a function to test if a pattern matches any files
        if is_local:

            def glob_matches_files(path, pattern):
                return len(glob.glob(path + pattern)) > 0
        else:

            def glob_matches_files(path, pattern):
                result = self.run("ls \"%s\"%s" %
                                  (utils.sh_escape(path), pattern),
                                  stdout_tee=None,
                                  ignore_status=True)
                return result.exit_status == 0

        # take a set of globs that cover all files, and see which are needed
        patterns = ["*", ".[!.]*"]
        patterns = [p for p in patterns if glob_matches_files(path, p)]

        # convert them into a set of paths suitable for the commandline
        if is_local:
            return [
                "\"%s\"%s" % (utils.sh_escape(path), pattern)
                for pattern in patterns
            ]
        else:
            return [
                utils.scp_remote_escape(path) + pattern for pattern in patterns
            ]

    def _make_rsync_compatible_source(self, source, is_local):
        """
        Applies the same logic as _make_rsync_compatible_globs, but
        applies it to an entire list of sources, producing a new list of
        sources, properly quoted.
        """
        return sum((self._make_rsync_compatible_globs(path, is_local)
                    for path in source), [])

    def _set_umask_perms(self, dest):
        """
        Given a destination file/dir (recursively) set the permissions on
        all the files and directories to the max allowed by running umask.
        """

        # now this looks strange but I haven't found a way in Python to _just_
        # get the umask, apparently the only option is to try to set it
        umask = os.umask(0)
        os.umask(umask)

        max_privs = 0777 & ~umask

        def set_file_privs(filename):
            file_stat = os.stat(filename)

            file_privs = max_privs
            # if the original file permissions do not have at least one
            # executable bit then do not set it anywhere
            if not file_stat.st_mode & 0111:
                file_privs &= ~0111

            os.chmod(filename, file_privs)

        # try a bottom-up walk so changes on directory permissions won't cut
        # our access to the files/directories inside it
        for root, dirs, files in os.walk(dest, topdown=False):
            # when setting the privileges we emulate the chmod "X" behaviour
            # that sets to execute only if it is a directory or any of the
            # owner/group/other already has execute right
            for dirname in dirs:
                os.chmod(os.path.join(root, dirname), max_privs)

            for filename in files:
                set_file_privs(os.path.join(root, filename))

        # now set privs for the dest itself
        if os.path.isdir(dest):
            os.chmod(dest, max_privs)
        else:
            set_file_privs(dest)

    def get_file(self,
                 source,
                 dest,
                 delete_dest=False,
                 preserve_perm=True,
                 preserve_symlinks=False):
        """
        Copy files from the remote host to a local path.

        Directories will be copied recursively.
        If a source component is a directory with a trailing slash,
        the content of the directory will be copied, otherwise, the
        directory itself and its content will be copied. This
        behavior is similar to that of the program 'rsync'.

        Args:
                source: either
                        1) a single file or directory, as a string
                        2) a list of one or more (possibly mixed)
                                files or directories
                dest: a file or a directory (if source contains a
                        directory or more than one element, you must
                        supply a directory dest)
                delete_dest: if this is true, the command will also clear
                             out any old files at dest that are not in the
                             source
                preserve_perm: tells get_file() to try to preserve the sources
                               permissions on files and dirs
                preserve_symlinks: try to preserve symlinks instead of
                                   transforming them into files/dirs on copy

        Raises:
                AutoservRunError: the scp command failed
        """

        # Start a master SSH connection if necessary.
        self.start_master_ssh()

        if isinstance(source, basestring):
            source = [source]
        dest = os.path.abspath(dest)

        # If rsync is disabled or fails, try scp.
        try_scp = True
        if self.use_rsync():
            try:
                remote_source = self._encode_remote_paths(source)
                local_dest = utils.sh_escape(dest)
                rsync = self._make_rsync_cmd([remote_source], local_dest,
                                             delete_dest, preserve_symlinks)
                utils.run(rsync)
                try_scp = False
            except error.CmdError as e:
                logging.warn("trying scp, rsync failed: %s" % e)

        if try_scp:
            # scp has no equivalent to --delete, just drop the entire dest dir
            if delete_dest and os.path.isdir(dest):
                shutil.rmtree(dest)
                os.mkdir(dest)

            remote_source = self._make_rsync_compatible_source(source, False)
            if remote_source:
                # _make_rsync_compatible_source() already did the escaping
                remote_source = self._encode_remote_paths(remote_source,
                                                          escape=False)
                local_dest = utils.sh_escape(dest)
                scp = self._make_scp_cmd([remote_source], local_dest)
                try:
                    utils.run(scp)
                except error.CmdError as e:
                    raise error.AutoservRunError(e.args[0], e.args[1])

        if not preserve_perm:
            # we have no way to tell scp to not try to preserve the
            # permissions so set them after copy instead.
            # for rsync we could use "--no-p --chmod=ugo=rwX" but those
            # options are only in very recent rsync versions
            self._set_umask_perms(dest)

    def send_file(self,
                  source,
                  dest,
                  delete_dest=False,
                  preserve_symlinks=False):
        """
        Copy files from a local path to the remote host.

        Directories will be copied recursively.
        If a source component is a directory with a trailing slash,
        the content of the directory will be copied, otherwise, the
        directory itself and its content will be copied. This
        behavior is similar to that of the program 'rsync'.

        Args:
                source: either
                        1) a single file or directory, as a string
                        2) a list of one or more (possibly mixed)
                                files or directories
                dest: a file or a directory (if source contains a
                        directory or more than one element, you must
                        supply a directory dest)
                delete_dest: if this is true, the command will also clear
                             out any old files at dest that are not in the
                             source
                preserve_symlinks: controls if symlinks on the source will be
                    copied as such on the destination or transformed into the
                    referenced file/directory

        Raises:
                AutoservRunError: the scp command failed
        """

        # Start a master SSH connection if necessary.
        self.start_master_ssh()

        if isinstance(source, basestring):
            source_is_dir = os.path.isdir(source)
            source = [source]
        remote_dest = self._encode_remote_paths([dest])

        # If rsync is disabled or fails, try scp.
        try_scp = True
        if self.use_rsync():
            try:
                local_sources = [utils.sh_escape(path) for path in source]
                rsync = self._make_rsync_cmd(local_sources, remote_dest,
                                             delete_dest, preserve_symlinks)
                utils.run(rsync)
                try_scp = False
            except error.CmdError as e:
                logging.warn("trying scp, rsync failed: %s" % e)

        if try_scp:
            # scp has no equivalent to --delete, just drop the entire dest dir
            if delete_dest:
                dest_exists = False
                try:
                    self.run("test -x %s" % dest)
                    dest_exists = True
                except error.AutoservRunError:
                    pass

                dest_is_dir = False
                if dest_exists:
                    try:
                        self.run("test -d %s" % dest)
                        dest_is_dir = True
                    except error.AutoservRunError:
                        pass

                # If there is a list of more than one path, destination *has*
                # to be a dir. If there's a single path being transferred and
                # it is a dir, the destination also has to be a dir. Therefore
                # it has to be created on the remote machine in case it doesn't
                # exist, otherwise we will have an scp failure.
                if len(source) > 1 or source_is_dir:
                    dest_is_dir = True

                if dest_exists and dest_is_dir:
                    cmd = "rm -rf %s && mkdir %s" % (dest, dest)
                    self.run(cmd)

                elif not dest_exists and dest_is_dir:
                    cmd = "mkdir %s" % dest
                    self.run(cmd)

            local_sources = self._make_rsync_compatible_source(source, True)
            if local_sources:
                scp = self._make_scp_cmd(local_sources, remote_dest)
                try:
                    utils.run(scp)
                except error.CmdError as e:
                    raise error.AutoservRunError(e.args[0], e.args[1])

    def ssh_ping(self, timeout=60):
        try:
            # Complex inheritance confuses pylint here
            # pylint: disable=E1123
            self.run("true", timeout=timeout, connect_timeout=timeout)
        except error.AutoservSSHTimeout:
            msg = "Host (ssh) verify timed out (timeout = %d)" % timeout
            raise error.AutoservSSHTimeout(msg)
        except error.AutoservSshPermissionDeniedError:
            # let AutoservSshPermissionDeniedError be visible to the callers
            raise
        except error.AutoservRunError as e:
            # convert the generic AutoservRunError into something more
            # specific for this context
            raise error.AutoservSshPingHostError(e.description + '\n' +
                                                 repr(e.result_obj))

    def is_up(self):
        """
        Check if the remote host is up.

        :return: True if the remote host is up, False otherwise
        """
        try:
            self.ssh_ping()
        except error.AutoservError:
            return False
        else:
            return True

    def wait_up(self, timeout=None):
        """
        Wait until the remote host is up or the timeout expires.

        In fact, it will wait until an ssh connection to the remote
        host can be established, and getty is running.

        :param timeout time limit in seconds before returning even
            if the host is not up.

        :return: True if the host was found to be up, False otherwise
        """
        if timeout:
            end_time = time.time() + timeout

        while not timeout or time.time() < end_time:
            if self.is_up():
                try:
                    if self.are_wait_up_processes_up():
                        logging.debug('Host %s is now up', self.hostname)
                        return True
                except error.AutoservError:
                    pass
            time.sleep(1)

        logging.debug('Host %s is still down after waiting %d seconds',
                      self.hostname, int(timeout + time.time() - end_time))
        return False

    def wait_down(self, timeout=None, warning_timer=None, old_boot_id=None):
        """
        Wait until the remote host is down or the timeout expires.

        If old_boot_id is provided, this will wait until either the machine
        is unpingable or self.get_boot_id() returns a value different from
        old_boot_id. If the boot_id value has changed then the function
        returns true under the assumption that the machine has shut down
        and has now already come back up.

        If old_boot_id is None then until the machine becomes unreachable the
        method assumes the machine has not yet shut down.

        :param timeout Time limit in seconds before returning even
            if the host is still up.
        :param warning_timer Time limit in seconds that will generate
            a warning if the host is not down yet.
        :param old_boot_id A string containing the result of self.get_boot_id()
            prior to the host being told to shut down. Can be None if this is
            not available.

        :return: True if the host was found to be down, False otherwise
        """
        # TODO: there is currently no way to distinguish between knowing
        # TODO: boot_id was unsupported and not knowing the boot_id.
        current_time = time.time()
        if timeout:
            end_time = current_time + timeout

        if warning_timer:
            warn_time = current_time + warning_timer

        if old_boot_id is not None:
            logging.debug('Host %s pre-shutdown boot_id is %s', self.hostname,
                          old_boot_id)

        while not timeout or current_time < end_time:
            try:
                new_boot_id = self.get_boot_id()
            except error.AutoservError:
                logging.debug('Host %s is now unreachable over ssh, is down',
                              self.hostname)
                return True
            else:
                # if the machine is up but the boot_id value has changed from
                # old boot id, then we can assume the machine has gone down
                # and then already come back up
                if old_boot_id is not None and old_boot_id != new_boot_id:
                    logging.debug(
                        'Host %s now has boot_id %s and so must '
                        'have rebooted', self.hostname, new_boot_id)
                    return True

            if warning_timer and current_time > warn_time:
                self.record("WARN", None, "shutdown",
                            "Shutdown took longer than %ds" % warning_timer)
                # Print the warning only once.
                warning_timer = None
                # If a machine is stuck switching runlevels
                # This may cause the machine to reboot.
                self.run('kill -HUP 1', ignore_status=True)

            time.sleep(1)
            current_time = time.time()

        return False

    # tunable constants for the verify & repair code
    AUTOTEST_GB_DISKSPACE_REQUIRED = settings.get_value(
        "SERVER", "gb_diskspace_required", type=int, default=20)

    def verify_connectivity(self):
        super(AbstractSSHHost, self).verify_connectivity()

        logging.info('Pinging host ' + self.hostname)
        self.ssh_ping()
        logging.info("Host (ssh) %s is alive", self.hostname)

        if self.is_shutting_down():
            raise error.AutoservHostIsShuttingDownError(
                "Host is shutting down")

    def verify_software(self):
        super(AbstractSSHHost, self).verify_software()
        try:
            self.check_diskspace(
                autotest_remote.Autotest.get_install_dir(self),
                self.AUTOTEST_GB_DISKSPACE_REQUIRED)
        except error.AutoservHostError:
            raise  # only want to raise if it's a space issue
        except autotest_remote.AutodirNotFoundError:
            # autotest_remote dir may not exist, etc. ignore
            logging.debug('autodir space check exception, this is probably '
                          'safe to ignore\n' + traceback.format_exc())

    def close(self):
        super(AbstractSSHHost, self).close()
        self._cleanup_master_ssh()
        os.remove(self.known_hosts_file)

    def _cleanup_master_ssh(self):
        """
        Release all resources (process, temporary directory) used by an active
        master SSH connection.
        """
        # If a master SSH connection is running, kill it.
        if self.master_ssh_job is not None:
            utils.nuke_subprocess(self.master_ssh_job.sp)
            self.master_ssh_job = None

        # Remove the temporary directory for the master SSH socket.
        if self.master_ssh_tempdir is not None:
            self.master_ssh_tempdir.clean()
            self.master_ssh_tempdir = None
            self.master_ssh_option = ''

    def start_master_ssh(self):
        """
        Called whenever a slave SSH connection needs to be initiated (e.g., by
        run, rsync, scp). If master SSH support is enabled and a master SSH
        connection is not active already, start a new one in the background.
        Also, cleanup any zombie master SSH connections (e.g., dead due to
        reboot).
        """
        if not enable_master_ssh:
            return

        # If a previously started master SSH connection is not running
        # anymore, it needs to be cleaned up and then restarted.
        if self.master_ssh_job is not None:
            if self.master_ssh_job.sp.poll() is not None:
                logging.info("Master ssh connection to %s is down.",
                             self.hostname)
                self._cleanup_master_ssh()

        # Start a new master SSH connection.
        if self.master_ssh_job is None:
            # Create a shared socket in a temp location.
            self.master_ssh_tempdir = autotemp.tempdir(unique_id='ssh-master')
            self.master_ssh_option = ("-o ControlPath=%s/socket" %
                                      self.master_ssh_tempdir.name)

            # Start the master SSH connection in the background.
            master_cmd = self.ssh_command(options="-N -o ControlMaster=yes")
            logging.info("Starting master ssh connection '%s'" % master_cmd)
            self.master_ssh_job = utils.BgJob(master_cmd)

    def clear_known_hosts(self):
        """Clears out the temporary ssh known_hosts file.

        This is useful if the test SSHes to the machine, then reinstalls it,
        then SSHes to it again.  It can be called after the reinstall to
        reduce the spam in the logs.
        """
        logging.info("Clearing known hosts for host '%s', file '%s'.",
                     self.hostname, self.known_hosts_file)
        # Clear out the file by opening it for writing and then closing.
        fh = open(self.known_hosts_file, "w")
        fh.close()
Example #35
0
 def _create_test_output_dir(self, host, autodir):
     tmpdir = os.path.join(autodir, 'tmp')
     state_autodir = settings.get_value('COMMON', 'test_output_dir',
                                        default=tmpdir)
     host.run('mkdir -p %s' % utils.sh_escape(state_autodir))
Example #36
0
import logging
import os
import re
import sys
import tempfile
import time
import traceback

from autotest.client import os_dep
from autotest.client import utils as client_utils
from autotest.client.shared import base_job, error, autotemp
from autotest.client.shared import packages
from autotest.client.shared.settings import settings, SettingsError
from autotest.server import installable_object, prebuild, utils

autoserv_prebuild = settings.get_value('AUTOSERV', 'enable_server_prebuild',
                                       type=bool, default=False)

CLIENT_BINARY = 'autotest-local-streamhandler'


class AutodirNotFoundError(Exception):

    """No Autotest installation could be found."""


# Paths you'll find when autotest is installed via distro package
SYSTEM_WIDE_PATHS = ['/usr/bin/autotest-local',
                     '/usr/bin/autotest-local-streamhandler',
                     '/usr/bin/autotest-daemon',
                     '/usr/bin/autotest-daemon-monitor']
Example #37
0
def runjob(control, drop_caches, options):
    """
    Run a job using the given control file.

    This is the main interface to this module.

    :see: base_job.__init__ for parameter info.
    """
    control = os.path.abspath(control)

    try:
        autodir = os.path.abspath(os.environ['AUTODIR'])
    except KeyError:
        autodir = settings.get_value('COMMON', 'autotest_top_path')

    tmpdir = os.path.join(autodir, 'tmp')
    tests_out_dir = settings.get_value('COMMON', 'test_output_dir',
                                       default=tmpdir)
    state = os.path.join(tests_out_dir, os.path.basename(control) + '.state')

    # Ensure state file is cleaned up before the job starts to run if autotest
    # is not running with the --continue flag
    if not options.cont and os.path.isfile(state):
        os.remove(state)

    # instantiate the job object ready for the control file.
    myjob = None
    try:
        # Check that the control file is valid
        if not os.path.exists(control):
            raise error.JobError(control + ": control file not found")

        # When continuing, the job is complete when there is no
        # state file, ensure we don't try and continue.
        if options.cont and not os.path.exists(state):
            raise error.JobComplete("all done")

        myjob = job(control=control, drop_caches=drop_caches, options=options)

        # Load in the users control file, may do any one of:
        #  1) execute in toto
        #  2) define steps, and select the first via next_step()
        myjob.step_engine()

    except error.JobContinue:
        sys.exit(5)

    except error.JobComplete:
        sys.exit(1)

    except error.JobError, instance:
        logging.error("JOB ERROR: " + str(instance))
        if myjob:
            command = None
            if len(instance.args) > 1:
                command = instance.args[1]
                myjob.record('ABORT', None, command, str(instance))
            myjob.record('END ABORT', None, None, str(instance))
            assert myjob._record_indent == 0
            myjob.complete(1)
        else:
            sys.exit(1)
Example #38
0
def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
              exclude_atomic_group_hosts=False, valid_only=True, **filter_data):
    """
    @param multiple_labels: match hosts in all of the labels given.  Should
            be a list of label names.
    @param exclude_only_if_needed_labels: Exclude hosts with at least one
            "only_if_needed" label applied.
    @param exclude_atomic_group_hosts: Exclude hosts that have one or more
            atomic group labels associated with them.
    """
    hosts = rpc_utils.get_host_query(multiple_labels,
                                     exclude_only_if_needed_labels,
                                     exclude_atomic_group_hosts,
                                     valid_only, filter_data)
    hosts = list(hosts)
    models.Host.objects.populate_relationships(hosts, models.Label,
                                               'label_list')
    models.Host.objects.populate_relationships(hosts, models.AclGroup,
                                               'acl_list')
    models.Host.objects.populate_relationships(hosts, models.HostAttribute,
                                               'attribute_list')

    install_server = None
    install_server_info = get_install_server_info()
    install_server_type = install_server_info.get('type', None)
    install_server_url = install_server_info.get('xmlrpc_url', None)

    if install_server_type == 'cobbler' and install_server_url:
        install_server = xmlrpclib.ServerProxy(install_server_url)

    host_dicts = []
    for host_obj in hosts:
        host_dict = host_obj.get_object_dict()
        host_dict['labels'] = [label.name for label in host_obj.label_list]
        host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
                find_platform_and_atomic_group(host_obj))
        host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
        host_dict['attributes'] = dict((attribute.attribute, attribute.value)
                                       for attribute in host_obj.attribute_list)

        error_encountered = True
        if install_server is not None:
            system_params = {"name": host_dict['hostname']}
            system_list = install_server.find_system(system_params, True)

            if len(system_list) < 1:
                msg = 'System "%s" not found on install server'
                rpc_logger = logging.getLogger('rpc_logger')
                rpc_logger.info(msg, host_dict['hostname'])

            elif len(system_list) > 1:
                msg = 'Found multiple systems on install server named %s'

                if install_server_type == 'cobbler':
                    msg = '%s. This should never happen on cobbler' % msg
                rpc_logger = logging.getLogger('rpc_logger')
                rpc_logger.error(msg, host_dict['hostname'])

            else:
                system = system_list[0]

                if host_dict['platform']:
                    error_encountered = False
                    profiles = sorted(install_server.get_item_names('profile'))
                    host_dict['profiles'] = profiles
                    host_dict['profiles'].insert(0, 'Do_not_install')
                    use_current_profile = settings.get_value('INSTALL_SERVER',
                            'use_current_profile', type=bool, default=True)
                    if use_current_profile:
                        host_dict['current_profile'] = system['profile']
                    else:
                        host_dict['current_profile'] = 'Do_not_install'

        if error_encountered:
            host_dict['profiles'] = ['N/A']
            host_dict['current_profile'] = 'N/A'

        host_dicts.append(host_dict)

    return rpc_utils.prepare_for_serialization(host_dicts)
Example #39
0
import cPickle
import logging
import os

try:
    import autotest.common as common  # pylint: disable=W0611
except ImportError:
    import common  # pylint: disable=W0611
from autotest.scheduler import drone_utility
from autotest.client.shared.settings import settings
from autotest.client.shared import mail

AUTOTEST_INSTALL_DIR = settings.get_value('SCHEDULER',
                                          'drone_installation_directory')


class DroneUnreachable(Exception):
    """The drone is non-sshable."""
    pass


class _AbstractDrone(object):
    """
    Attributes:
    * allowed_users: set of usernames allowed to use this drone.  if None,
            any user can use this drone.
    """
    def __init__(self):
        self._calls = []
        self.hostname = None
        self.enabled = True
Example #40
0
    def result_notify(self, job, email_from, email_to):
        """
        Notify about the result of a job. Will always print, if email data
        is provided, will send email for it as well.

            job: job object to notify about
            email_from: send notification email upon completion from here
            email_from: send notification email upon completion to here
        """
        if job.result is True:
            subject = 'Testing PASSED: '
        else:
            subject = 'Testing FAILED: '
        subject += '%s : %s\n' % (job.name, job.id)
        text = []
        for platform in job.results_platform_map:
            for status in job.results_platform_map[platform]:
                if status == 'Total':
                    continue
                for host in job.results_platform_map[platform][status]:
                    text.append('%20s %10s %10s' % (platform, status, host))
                    if status == 'Failed':
                        for test_status in job.test_status[host].fail:
                            text.append('(%s, %s) : %s' %
                                        (host, test_status.test_name,
                                         test_status.reason))
                        text.append('')

        base_url = 'http://' + self.server

        params = ('columns=test',
                  'rows=machine_group',
                  "condition=tag~'%s-%%25'" % job.id,
                  'title=Report')
        query_string = '&'.join(params)
        url = '%s/tko/compose_query.cgi?%s' % (base_url, query_string)
        text.append(url + '\n')
        url = '%s/afe/#tab_id=view_job&object_id=%s' % (base_url, job.id)
        text.append(url + '\n')

        smtp_info = {}
        smtp_info['server'] = settings.get_value('SERVER', 'smtp_server',
                                                 default='localhost')
        smtp_info['port'] = settings.get_value('SERVER', 'smtp_port',
                                               default='')
        smtp_info['user'] = settings.get_value('SERVER', 'smtp_user',
                                               default='')
        smtp_info['password'] = settings.get_value('SERVER', 'smtp_password',
                                                   default='')

        body = '\n'.join(text)
        print '---------------------------------------------------'
        print 'Subject: ', subject
        print body
        print '---------------------------------------------------'
        if email_from and email_to:
            print 'Sending email ...'
            mail.send(from_address=email_from,
                      to_addresses=email_to,
                      subject=subject,
                      body=body,
                      smtp_info=smtp_info)
        print
Example #41
0
def runtest(job,
            url,
            tag,
            args,
            dargs,
            local_namespace={},
            global_namespace={},
            before_test_hook=None,
            after_test_hook=None,
            before_iteration_hook=None,
            after_iteration_hook=None):
    local_namespace = local_namespace.copy()
    global_namespace = global_namespace.copy()

    # if this is not a plain test name then download and install the
    # specified test
    if url.endswith('.tar.bz2'):
        (testgroup, testname) = _installtest(job, url)
        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
        importdir = os.path.join(job.testdir, 'download')
        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
        classname = '%s.%s' % (modulename, testname)
        path = testname
    else:
        # If the test is local, it may be under either testdir or site_testdir.
        # Tests in site_testdir override tests defined in testdir
        testname = path = url
        testgroup = ''
        path = re.sub(':', '/', testname)
        modulename = os.path.basename(path)
        classname = '%s.%s' % (modulename, modulename)

        # Try installing the test package
        # The job object may be either a server side job or a client side job.
        # 'install_pkg' method will be present only if it's a client side job.
        if hasattr(job, 'install_pkg'):
            try:
                bindir = os.path.join(job.site_testdir, testname)
                job.install_pkg(testname, 'test', bindir)
            except error.PackageInstallError:
                # continue as a fall back mechanism and see if the test code
                # already exists on the machine
                pass

        testdir_list = [job.testdir, getattr(job, 'site_testdir', None)]
        bindir_config = settings.get_value('COMMON', 'test_dir', default="")
        if bindir_config:
            testdir_list.extend(bindir_config.strip().split(','))

        bindir = None
        for t_dir in testdir_list:
            if t_dir is not None and os.path.exists(os.path.join(t_dir, path)):
                importdir = bindir = os.path.join(t_dir, path)
        if not bindir:
            raise error.TestError(testname + ': test does not exist')

    subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
    outputdir = os.path.join(job.resultdir, subdir)
    if tag:
        outputdir += '.' + tag

    local_namespace['job'] = job
    local_namespace['outputdir'] = outputdir
    local_namespace['bindir'] = bindir

    sys.path.insert(0, importdir)
    try:
        exec('import %s' % modulename, local_namespace, global_namespace)
        exec("mytest = %s(job, bindir, outputdir)" % classname,
             local_namespace, global_namespace)
    finally:
        sys.path.pop(0)

    pwd = os.getcwd()
    os.chdir(outputdir)

    try:
        mytest = global_namespace['mytest']
        if before_test_hook:
            before_test_hook(mytest)

        # we use the register iteration hooks methods to register the passed
        # in hooks
        if before_iteration_hook:
            mytest.register_before_iteration_hook(before_iteration_hook)
        if after_iteration_hook:
            mytest.register_after_iteration_hook(after_iteration_hook)
        mytest._exec(args, dargs)
    finally:
        os.chdir(pwd)
        if after_test_hook:
            after_test_hook(mytest)
        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
Example #42
0
def main():
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        output_dir = settings.get_value('COMMON',
                                        'test_output_dir',
                                        default="")
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
            if output_dir:
                results = os.path.join(output_dir, results)

        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=results,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)
    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if parser.options.use_existing_results and not resultdir_exists:
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    if parser.options.write_pidfile:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest_remote.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    exit_code = 0
    try:
        try:
            run_autoserv(pid_file_manager, results, parser)
        except SystemExit as e:
            exit_code = e.code
        except Exception:
            traceback.print_exc()
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
    sys.exit(exit_code)
Example #43
0
import logging
import os
import re
import signal
import sys
import time
import traceback

try:
    import autotest.common as common  # pylint: disable=W0611
except ImportError:
    import common  # pylint: disable=W0611

from autotest.client.shared.settings import settings
require_atfork = settings.get_value('AUTOSERV',
                                    'require_atfork_module',
                                    type=bool,
                                    default=True)

try:
    import atfork
    atfork.monkeypatch_os_fork_functions()
    import atfork.stdlib_fixer
    # Fix the Python standard library for threading+fork safety with its
    # internal locks.  http://code.google.com/p/python-atfork/
    import warnings
    warnings.filterwarnings('ignore', 'logging module already imported')
    try:
        atfork.stdlib_fixer.fix_logging_module()
    except Exception:
        pass
except ImportError as e:
Example #44
0
# -*- coding:utf-8 -*-
import os, sys
import re
import shutil
from autotest.client import test, utils
from autotest.client.shared import error

# lpt env set
from autotest.client.shared.settings import settings
try:
    autodir = os.path.abspath(os.environ['AUTODIR'])
except KeyError:
    autodir = settings.get_value('COMMON', 'autotest_top_path')

autodir = os.path.abspath(os.environ['AUTODIR'])

#lptdir = os.path.join(os.path.dirname(autodir), "lpt")
lptdir = os.path.join(autodir, "lpts")
os.environ['LPTROOT'] = lptdir
from autotest.client import setup_modules

setup_modules.setup(base_path=lptdir, root_module_name="lpt")

import lpt.api.test as lpt_test
from lpt.lib import lptlog
from lpt.lib.share import utils as lutils

unixbench_keys = [
    'Dhrystone2-using-register-variables', 'Double-Precision-Whetstone',
    'Execl-Throughput', 'FileCopy1024-bufsize2000-maxblocks',
    'FileCopy256-bufsize500-maxblocks', 'FileCopy4096-bufsize8000-maxblocks',
Example #45
0
 def get_fetch_location(self):
     repos = settings.get_value("PACKAGES", 'fetch_location', type=list,
                                default=[])
     repos.reverse()
     return repos
Example #46
0
def get_mappings_2x():
    KERNEL_BASE_URL = settings.get_value('CLIENT', 'kernel_mirror', default='')
    GITWEB_BASE_URL = settings.get_value('CLIENT', 'kernel_gitweb', default='')
    STABLE_GITWEB_BASE_URL = settings.get_value('CLIENT',
                                                'stable_kernel_gitweb',
                                                default='')

    MAPPINGS_2X = [
        [
            r'^\d+\.\d+$', '', True,
            map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2',
                KERNEL_BASE_URL.split()) +
            map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz',
                GITWEB_BASE_URL.split())
        ],
        [
            r'^\d+\.\d+\.\d+$', '', True,
            map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2',
                KERNEL_BASE_URL.split()) +
            map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz',
                GITWEB_BASE_URL.split())
        ],
        [
            r'^\d+\.\d+\.\d+\.\d+$', '', True,
            map(lambda x: x + 'v%(major)s/linux-%(full)s.tar.bz2',
                KERNEL_BASE_URL.split()) +
            map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz',
                STABLE_GITWEB_BASE_URL.split())
        ],
        [
            r'-rc\d+$', '%(minor-prev)s', True,
            map(
                lambda x: x +
                'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2',
                KERNEL_BASE_URL.split()) +
            map(lambda x: x + 'v%(major)s/testing/linux-%(full)s.tar.bz2',
                KERNEL_BASE_URL.split()) +
            map(lambda x: x + ';a=snapshot;h=refs/tags/v%(full)s;sf=tgz',
                GITWEB_BASE_URL.split())
        ],
        [
            r'-(git|bk)\d+$', '%(base)s', False,
            map(lambda x: x + 'v%(major)s/snapshots/old/patch-%(full)s.bz2',
                KERNEL_BASE_URL.split()) +
            map(lambda x: x + 'v%(major)s/snapshots/patch-%(full)s.bz2',
                KERNEL_BASE_URL.split())
        ],
        [
            r'-mm\d+$', '%(base)s', False,
            map(
                lambda x: x + 'people/akpm/patches/' +
                '%(major)s/%(base)s/%(full)s/%(full)s.bz2',
                KERNEL_BASE_URL.split())
        ],
        [
            r'-mjb\d+$', '%(base)s', False,
            map(lambda x: x + 'people/mbligh/%(base)s/patch-%(full)s.bz2',
                KERNEL_BASE_URL.split())
        ],
        [
            r'[a-f0-9]{7,40}$', '', True,
            map(lambda x: x + ';a=snapshot;h=%(full)s;sf=tgz',
                GITWEB_BASE_URL.split()) +
            map(lambda x: x + ';a=snapshot;h=%(full)s;sf=tgz',
                STABLE_GITWEB_BASE_URL.split())
        ]
    ]

    return MAPPINGS_2X
Example #47
0
 def get_client_autodir_paths(cls, host):
     return settings.get_value('AUTOSERV', 'client_autodir_paths', type=list)