Example #1
0
def _usable_ro_dir(directory):
    """
    Verify whether dir exists and we can access its contents.

    If a usable RO is there, use it no questions asked. If not, let's at
    least try to create one.

    :param directory: Directory
    """
    cwd = os.getcwd()
    if os.path.isdir(directory):
        try:
            os.chdir(directory)
            os.chdir(cwd)
            return True
        except OSError:
            pass
    else:
        try:
            utils_path.init_dir(directory)
            return True
        except OSError:
            pass

    return False
Example #2
0
def _usable_ro_dir(directory):
    """
    Verify whether dir exists and we can access its contents.

    If a usable RO is there, use it no questions asked. If not, let's at
    least try to create one.

    :param directory: Directory
    """
    cwd = os.getcwd()
    if os.path.isdir(directory):
        try:
            os.chdir(directory)
            os.chdir(cwd)
            return True
        except OSError:
            pass
    else:
        try:
            utils_path.init_dir(directory)
            return True
        except OSError:
            pass

    return False
Example #3
0
    def save(self):
        """
        Store script to file system.

        :return: `True` if script has been stored, otherwise `False`.
        """
        dirname = os.path.dirname(self.path)
        utils_path.init_dir(dirname)
        with open(self.path, 'w') as fd:
            fd.write(self.content)
            os.chmod(self.path, self.mode)
            self.stored = True
        return self.stored
Example #4
0
    def save(self):
        """
        Store script to file system.

        :return: `True` if script has been stored, otherwise `False`.
        """
        dirname = os.path.dirname(self.path)
        utils_path.init_dir(dirname)
        with open(self.path, self.open_mode) as fd:
            fd.write(self.content)
            os.chmod(self.path, self.mode)
            self.stored = True
        return self.stored
Example #5
0
    def workdir(self):
        """
        This property returns a writable directory that exists during
        the entire test execution, but will be cleaned up once the
        test finishes.

        It can be used on tasks such as decompressing source tarballs,
        building software, etc.
        """
        if self.__workdir is None:
            self.__workdir = os.path.join(self._base_tmpdir, self.name.str_filesystem)
            utils_path.init_dir(self.__workdir)
            self.log.debug("Test workdir initialized at: %s", self.__workdir)
        return self.__workdir
Example #6
0
    def __init__(self, config_path=None):
        """
        Constructor. Tries to find the main settings file and load it.

        :param config_path: Path to a config file. Useful for unittesting.
        """
        self.config = ConfigParser.ConfigParser()
        self.intree = False
        self.config_paths = []
        self.config_paths_failed = []
        if config_path is None:
            config_system = os.path.exists(config_path_system)
            config_system_extra = os.path.exists(_config_dir_system_extra)
            config_local = os.path.exists(config_path_local)
            config_intree = os.path.exists(config_path_intree)
            config_intree_extra = os.path.exists(_config_path_intree_extra)
            if (not config_system) and (not config_local) and (
                    not config_intree):
                raise ConfigFileNotFound([
                    config_path_system, config_path_local, config_path_intree
                ])
            if config_intree:
                # In this case, respect only the intree config
                self._process_config_path(config_path_intree)
                if config_intree_extra:
                    for extra_file in glob.glob(
                            os.path.join(_config_path_intree_extra, '*.conf')):
                        self._process_config_path(extra_file)
                self.intree = True
            else:
                # In this case, load first the global config, then the
                # local config overrides the global one
                if config_system:
                    self._process_config_path(config_path_system)
                    if config_system_extra:
                        for extra_file in glob.glob(
                                os.path.join(_config_dir_system_extra,
                                             '*.conf')):
                            self._process_config_path(extra_file)
                if not config_local:
                    path.init_dir(_config_dir_local)
                    with open(config_path_local, 'w') as config_local_fileobj:
                        config_local_fileobj.write(
                            '# You can use this file to override configuration values from '
                            '%s and %s\n' %
                            (config_path_system, _config_dir_system_extra))
                self._process_config_path(config_path_local)
        else:
            # Unittests
            self._process_config_path(config_path)
Example #7
0
 def __init__(self, ec2_instance, ec2_service, credentials,
              node_prefix='node', node_index=1, ami_username='******',
              base_logdir=None):
     self.instance = ec2_instance
     self.name = '%s-%s' % (node_prefix, node_index)
     try:
         self.logdir = path.init_dir(base_logdir, self.name)
     except OSError:
         self.logdir = os.path.join(base_logdir, self.name)
     self.ec2 = ec2_service
     self._instance_wait_safe(self.instance.wait_until_running)
     self.wait_public_ip()
     self.is_seed = None
     self.ec2.create_tags(Resources=[self.instance.id],
                          Tags=[{'Key': 'Name', 'Value': self.name}])
     # Make the instance created to be immune to Tzach's killer script
     self.ec2.create_tags(Resources=[self.instance.id],
                          Tags=[{'Key': 'keep', 'Value': 'alive'}])
     self.remoter = Remote(hostname=self.instance.public_ip_address,
                           user=ami_username,
                           key_file=credentials.key_file)
     logger = logging.getLogger('avocado.test')
     self.log = SDCMAdapter(logger, extra={'prefix': str(self)})
     self.log.debug("SSH access -> 'ssh -i %s %s@%s'",
                    credentials.key_file, ami_username,
                    self.instance.public_ip_address)
     self._journal_thread = None
     self.start_journal_thread()
     # We'll assume 0 coredump for starters, if by a chance there
     # are already coredumps in there the coredump backtrace
     # code will report all of them.
     self._n_coredumps = 0
     self._backtrace_thread = None
     self.start_backtrace_thread()
Example #8
0
def get_job_logs_dir(args=None, unique_id=None):
    """
    Create a log directory for a job, or a stand alone execution of a test.

    Also, symlink the created dir with [avocado-logs-dir]/latest.

    :param args: :class:`argparse.Namespace` instance with cmdline arguments
                 (optional).
    :rtype: basestring
    """
    start_time = time.strftime('%Y-%m-%dT%H.%M')
    if args is not None:
        logdir = args.logdir or get_logs_dir()
    else:
        logdir = get_logs_dir()
    # Stand alone tests handling
    if unique_id is None:
        unique_id = job_id.create_unique_job_id()

    debugbase = 'job-%s-%s' % (start_time, unique_id[:7])
    debugdir = path.init_dir(logdir, debugbase)
    latestdir = os.path.join(logdir, "latest")
    try:
        os.unlink(latestdir)
    except OSError:
        pass
    os.symlink(debugbase, latestdir)
    return debugdir
Example #9
0
 def __init__(self, queue, runnable):
     self.__vt_params = utils_params.Params(runnable.kwargs)
     self.queue = queue
     self.tmpdir = tempfile.mkdtemp()
     self.logdir = os.path.join(self.tmpdir, 'results')
     path.init_dir(self.logdir)
     self.logfile = os.path.join(self.logdir, 'debug.log')
     self.log = output.LOG_JOB
     self.log_level = runnable.config.get('job.output.loglevel',
                                          logging.DEBUG)
     self.env_version = utils_env.get_env_version()
     self.iteration = 0
     self.background_errors = error_event.error_events_bus
     # clear existing error events
     self.background_errors.clear()
     self.debugdir = self.logdir
     self.bindir = data_dir.get_root_dir()
     self.virtdir = os.path.join(self.bindir, 'shared')
Example #10
0
    def __init__(self, config_path=None):
        """
        Constructor. Tries to find the main settings file and load it.

        :param config_path: Path to a config file. Useful for unittesting.
        """
        self.config = ConfigParser.ConfigParser()
        self.intree = False
        self.config_paths = []
        self.config_paths_failed = []
        if config_path is None:
            config_system = os.path.exists(config_path_system)
            config_system_extra = os.path.exists(_config_dir_system_extra)
            config_local = os.path.exists(config_path_local)
            config_intree = os.path.exists(config_path_intree)
            config_intree_extra = os.path.exists(_config_path_intree_extra)
            if (not config_system) and (not config_local) and (not config_intree):
                raise ConfigFileNotFound([config_path_system,
                                          config_path_local,
                                          config_path_intree])
            if config_intree:
                # In this case, respect only the intree config
                self._process_config_path(config_path_intree)
                if config_intree_extra:
                    for extra_file in glob.glob(os.path.join(_config_path_intree_extra, '*.conf')):
                        self._process_config_path(extra_file)
                self.intree = True
            else:
                # In this case, load first the global config, then the
                # local config overrides the global one
                if config_system:
                    self._process_config_path(config_path_system)
                    if config_system_extra:
                        for extra_file in glob.glob(os.path.join(_config_dir_system_extra, '*.conf')):
                            self._process_config_path(extra_file)
                if not config_local:
                    path.init_dir(_config_dir_local)
                    with open(config_path_local, 'w') as config_local_fileobj:
                        config_local_fileobj.write('# You can use this file to override configuration values from '
                                                   '%s and %s\n' % (config_path_system, _config_dir_system_extra))
                self._process_config_path(config_path_local)
        else:
            # Unittests
            self._process_config_path(config_path)
Example #11
0
    def _render_report(self):
        context = ReportModel(json_input=self.json, html_output=self.output)
        html = HTML()
        template = html.get_resource_path('templates', 'report.mustache')

        # pylint: disable=E0611
        if hasattr(pystache, 'Renderer'):
            renderer = pystache.Renderer('utf-8', 'utf-8')
            report_contents = renderer.render(
                open(template, 'r').read(), context)
        else:
            from pystache import view
            v = view.View(open(template, 'r').read(), context)
            report_contents = v.render('utf8')

        static_basedir = html.get_resource_path('static')
        output_dir = os.path.dirname(os.path.abspath(self.output))
        utils_path.init_dir(output_dir)
        for resource_dir in os.listdir(static_basedir):
            res_dir = os.path.join(static_basedir, resource_dir)
            out_dir = os.path.join(output_dir, resource_dir)
            if os.path.exists(out_dir):
                shutil.rmtree(out_dir)
            shutil.copytree(res_dir, out_dir)
        with codecs.open(self.output, 'w', 'utf-8') as report_file:
            report_file.write(report_contents)

        if self.args is not None:
            if getattr(self.args, 'open_browser'):
                # if possible, put browser in separate process group, so
                # keyboard interrupts don't affect browser as well as Python
                setsid = getattr(os, 'setsid', None)
                if not setsid:
                    setsid = getattr(os, 'setpgrp', None)
                inout = file(os.devnull, "r+")
                cmd = ['xdg-open', self.output]
                subprocess.Popen(cmd,
                                 close_fds=True,
                                 stdin=inout,
                                 stdout=inout,
                                 stderr=inout,
                                 preexec_fn=setsid)
Example #12
0
    def __init__(self, job, test_result):
        """
        Creates an instance of TestRunner class.

        :param job: an instance of :class:`avocado.job.Job`.
        :param test_result: an instance of :class:`avocado.result.TestResult`.
        """
        self.job = job
        self.result = test_result
        sysinfo_dir = path.init_dir(self.job.logdir, 'sysinfo')
        self.sysinfo = sysinfo.SysInfo(basedir=sysinfo_dir)
Example #13
0
 def _screendump_thread_start(self):
     self._screendump_thread_enable = self.params.get(
         'enable', '/plugins/virt/screendumps/*')
     self._video_enable = self.params.get('enable',
                                          '/plugins/virt/videos/*')
     if self._screendump_thread_enable:
         self.screendump_dir = utils_path.init_dir(
             os.path.join(self.logdir, 'screendumps', self.short_id))
         self._screendump_terminate = threading.Event()
         self._screendump_thread = threading.Thread(
             target=self._take_screendumps, name='VmScreendumps')
         self._screendump_thread.start()
Example #14
0
 def _screendump_thread_start(self):
     self._screendump_thread_enable = self.params.get('enable',
                                                      '/plugins/virt/screendumps/*')
     self._video_enable = self.params.get('enable',
                                          '/plugins/virt/videos/*')
     if self._screendump_thread_enable:
         self.screendump_dir = utils_path.init_dir(
             os.path.join(self.logdir, 'screendumps', self.short_id))
         self._screendump_terminate = threading.Event()
         self._screendump_thread = threading.Thread(target=self._take_screendumps,
                                                    name='VmScreendumps')
         self._screendump_thread.start()
    def __init__(self,
                 ec2_ami_id,
                 ec2_subnet_id,
                 ec2_security_group_ids,
                 service,
                 credentials,
                 cluster_uuid=None,
                 ec2_instance_type='c4.xlarge',
                 ec2_ami_username='******',
                 ec2_user_data='',
                 ec2_block_device_mappings=None,
                 cluster_prefix='cluster',
                 node_prefix='node',
                 n_nodes=10,
                 params=None):
        global CREDENTIALS
        CREDENTIALS.append(credentials)

        self.ec2_ami_id = ec2_ami_id
        self.ec2_subnet_id = ec2_subnet_id
        self.ec2_security_group_ids = ec2_security_group_ids
        self.ec2 = service
        self.credentials = credentials
        self.ec2_instance_type = ec2_instance_type
        self.ec2_ami_username = ec2_ami_username
        if ec2_block_device_mappings is None:
            ec2_block_device_mappings = []
        self.ec2_block_device_mappings = ec2_block_device_mappings
        self.ec2_user_data = ec2_user_data
        self.node_prefix = node_prefix
        self.ec2_ami_id = ec2_ami_id
        if cluster_uuid is None:
            self.uuid = uuid.uuid4()
        else:
            self.uuid = cluster_uuid
        self.shortid = str(self.uuid)[:8]
        self.name = '%s-%s' % (cluster_prefix, self.shortid)
        # I wanted to avoid some parameter passing
        # from the tester class to the cluster test.
        assert 'AVOCADO_TEST_LOGDIR' in os.environ
        try:
            self.logdir = path.init_dir(os.environ['AVOCADO_TEST_LOGDIR'],
                                        self.name)
        except OSError:
            self.logdir = os.path.join(os.environ['AVOCADO_TEST_LOGDIR'],
                                       self.name)
        logger = logging.getLogger('avocado.test')
        self.log = SDCMAdapter(logger, extra={'prefix': str(self)})
        self.log.info('Init nodes')
        self.nodes = []
        self.params = params
        self.add_nodes(n_nodes)
Example #16
0
 def node_run_stress(node):
     try:
         logdir = path.init_dir(output_dir, self.name)
     except OSError:
         logdir = os.path.join(output_dir, self.name)
     result = node.remoter.run(cmd=stress_cmd, timeout=timeout,
                               ignore_status=True)
     log_file_name = os.path.join(logdir, 'cassandra-stress-%s.log' % uuid.uuid4())
     self.log.debug('Writing cassandra-stress log %s', log_file_name)
     with open(log_file_name, 'w') as log_file:
         log_file.write(str(result))
     queue.put((node, result))
     queue.task_done()
Example #17
0
def create_job_logs_dir(base_dir=None, unique_id=None):
    """
    Create a log directory for a job, or a stand alone execution of a test.

    :param base_dir: Base log directory, if `None`, use value from configuration.
    :param unique_id: The unique identification. If `None`, create one.
    :rtype: str
    """
    start_time = time.strftime('%Y-%m-%dT%H.%M')
    if base_dir is None:
        base_dir = get_logs_dir()
        if not base_dir:
            LOG_UI.error("No writable location for logs found, use "
                         "'avocado config --datadir' to get the "
                         "locations and check system permissions.")
            sys.exit(exit_codes.AVOCADO_FAIL)
    if not os.path.exists(base_dir):
        utils_path.init_dir(base_dir)
    # Stand alone tests handling
    if unique_id is None:
        unique_id = job_id.create_unique_job_id()

    logdir = os.path.join(base_dir, 'job-%s-%s' % (start_time, unique_id[:7]))
    for i in range(7, len(unique_id)):
        try:
            os.mkdir(logdir)
        except OSError:
            logdir += unique_id[i]
            continue
        return logdir
    logdir += "."
    for i in range(1000):
        try:
            os.mkdir(logdir + str(i))
        except OSError:
            continue
        return logdir + str(i)
    raise IOError("Unable to create unique logdir in 1000 iterations: %s" %
                  (logdir))
Example #18
0
File: genio.py Project: ypu/avocado
def log_line(filename, line):
    """
    Write a line to a file.

    :param filename: Path of file to write to, either absolute or relative to
                     the dir set by set_log_file_dir().
    :param line: Line to write.
    """
    global _open_log_files, _log_file_dir

    path = utils_path.get_path(_log_file_dir, filename)
    if path not in _open_log_files:
        # First, let's close the log files opened in old directories
        close_log_file(filename)
        # Then, let's open the new file
        try:
            utils_path.init_dir(os.path.dirname(path))
        except OSError:
            pass
        _open_log_files[path] = open(path, "w")
    timestr = time.strftime("%Y-%m-%d %H:%M:%S")
    _open_log_files[path].write("%s: %s\n" % (timestr, line))
    _open_log_files[path].flush()
Example #19
0
 def _screendump_thread_start(self):
     thread_enable = 'avocado.args.run.screendump_thread.enable'
     self._screendump_thread_enable = self.params.get(
         thread_enable, defaults.screendump_thread_enable)
     video_enable = 'avocado.args.run.video_encoding.enable'
     self._video_enable = self.params.get(video_enable,
                                          defaults.video_encoding_enable)
     if self._screendump_thread_enable:
         self.screendump_dir = utils_path.init_dir(
             os.path.join(self.logdir, 'screendumps', self.short_id))
         self._screendump_terminate = threading.Event()
         self._screendump_thread = threading.Thread(
             target=self._take_screendumps, name='VmScreendumps')
         self._screendump_thread.start()
Example #20
0
def log_line(filename, line):
    """
    Write a line to a file.

    :param filename: Path of file to write to, either absolute or relative to
                     the dir set by set_log_file_dir().
    :param line: Line to write.
    """
    global _open_log_files, _log_file_dir

    path = utils_path.get_path(_log_file_dir, filename)
    if path not in _open_log_files:
        # First, let's close the log files opened in old directories
        close_log_file(filename)
        # Then, let's open the new file
        try:
            utils_path.init_dir(os.path.dirname(path))
        except OSError:
            pass
        _open_log_files[path] = open(path, "w")
    timestr = time.strftime("%Y-%m-%d %H:%M:%S")
    _open_log_files[path].write("%s: %s\n" % (timestr, line))
    _open_log_files[path].flush()
Example #21
0
    def _render_report(self):
        context = ReportModel(json_input=self.json, html_output=self.output)
        html = HTML()
        template = html.get_resource_path('templates', 'report.mustache')

        # pylint: disable=E0611
        if hasattr(pystache, 'Renderer'):
            renderer = pystache.Renderer('utf-8', 'utf-8')
            report_contents = renderer.render(open(template, 'r').read(), context)
        else:
            from pystache import view
            v = view.View(open(template, 'r').read(), context)
            report_contents = v.render('utf8')

        static_basedir = html.get_resource_path('static')
        output_dir = os.path.dirname(os.path.abspath(self.output))
        utils_path.init_dir(output_dir)
        for resource_dir in os.listdir(static_basedir):
            res_dir = os.path.join(static_basedir, resource_dir)
            out_dir = os.path.join(output_dir, resource_dir)
            if os.path.exists(out_dir):
                shutil.rmtree(out_dir)
            shutil.copytree(res_dir, out_dir)
        with codecs.open(self.output, 'w', 'utf-8') as report_file:
            report_file.write(report_contents)

        if self.args is not None:
            if getattr(self.args, 'open_browser'):
                # if possible, put browser in separate process group, so
                # keyboard interrupts don't affect browser as well as Python
                setsid = getattr(os, 'setsid', None)
                if not setsid:
                    setsid = getattr(os, 'setpgrp', None)
                inout = file(os.devnull, "r+")
                cmd = ['xdg-open', self.output]
                subprocess.Popen(cmd, close_fds=True, stdin=inout, stdout=inout,
                                 stderr=inout, preexec_fn=setsid)
Example #22
0
def _usable_rw_dir(directory):
    """
    Verify wether we can use this dir (read/write).

    Checks for appropriate permissions, and creates missing dirs as needed.

    :param directory: Directory
    """
    if os.path.isdir(directory):
        try:
            fd, path = tempfile.mkstemp(dir=directory)
            os.close(fd)
            os.unlink(path)
            return True
        except OSError:
            pass
    else:
        try:
            utils_path.init_dir(directory)
            return True
        except OSError:
            pass

    return False
Example #23
0
def _usable_rw_dir(directory):
    """
    Verify wether we can use this dir (read/write).

    Checks for appropriate permissions, and creates missing dirs as needed.

    :param directory: Directory
    """
    if os.path.isdir(directory):
        try:
            fd, path = tempfile.mkstemp(dir=directory)
            os.close(fd)
            os.unlink(path)
            return True
        except OSError:
            pass
    else:
        try:
            utils_path.init_dir(directory)
            return True
        except OSError:
            pass

    return False
Example #24
0
    def __init__(self, basedir=None, log_packages=None, profiler=None):
        """
        Set sysinfo collectibles.

        :param basedir: Base log dir where sysinfo files will be located.
        :param log_packages: Whether to log system packages (optional because
                             logging packages is a costly operation). If not
                             given explicitly, tries to look in the config
                             files, and if not found, defaults to False.
        :param profiler: Whether to use the profiler. If not given explicitly,
                         tries to look in the config files.
        """
        self.config = settings.as_dict()

        if basedir is None:
            basedir = utils_path.init_dir("sysinfo")
        self.basedir = basedir

        self._installed_pkgs = None
        if log_packages is None:
            packages_namespace = "sysinfo.collect.installed_packages"
            self.log_packages = self.config.get(packages_namespace)
        else:
            self.log_packages = log_packages

        self._get_collectibles(profiler)

        self.start_collectibles = set()
        self.end_collectibles = set()
        self.end_fail_collectibles = set()

        self.pre_dir = utils_path.init_dir(self.basedir, "pre")
        self.post_dir = utils_path.init_dir(self.basedir, "post")
        self.profile_dir = utils_path.init_dir(self.basedir, "profile")

        self._set_collectibles()
 def node_run_stress(node):
     try:
         logdir = path.init_dir(output_dir, self.name)
     except OSError:
         logdir = os.path.join(output_dir, self.name)
     result = node.remoter.run(cmd=stress_cmd,
                               timeout=timeout,
                               ignore_status=True,
                               watch_stdout_pattern='total,')
     node.cs_start_time = result.stdout_pattern_found_at
     log_file_name = os.path.join(
         logdir, 'cassandra-stress-%s.log' % uuid.uuid4())
     self.log.debug('Writing cassandra-stress log %s', log_file_name)
     with open(log_file_name, 'w') as log_file:
         log_file.write(str(result))
     queue.put((node, result))
     queue.task_done()
Example #26
0
def record(job, cmdline=None):
    """
    Records all required job information.
    """
    base_dir = init_dir(job.logdir, JOB_DATA_DIR)
    path_cfg = os.path.join(base_dir, CONFIG_FILENAME)
    path_references = os.path.join(base_dir, TEST_REFERENCES_FILENAME)
    path_pwd = os.path.join(base_dir, PWD_FILENAME)
    path_job_config = os.path.join(base_dir, JOB_CONFIG_FILENAME)
    path_cmdline = os.path.join(base_dir, CMDLINE_FILENAME)

    references = job.config.get('resolver.references')
    if references:
        with open(path_references, 'w', encoding='utf-8') as references_file:
            references_file.write(f'{references}')
            references_file.flush()
            os.fsync(references_file)

    with open(path_cfg, 'w', encoding='utf-8') as config_file:
        settings.config.write(config_file)
        config_file.flush()
        os.fsync(config_file)

    for idx, suite in enumerate(job.test_suites, 1):
        if suite.name:
            suite_var_name = f"variants-{idx}-{suite.name}.json"
        else:
            suite_var_name = f"variants-{idx}.json"
        suite_var_name = string_to_safe_path(suite_var_name)
        path_suite_variant = os.path.join(base_dir, suite_var_name)
        record_suite_variant(path_suite_variant, suite)

    with open(path_pwd, 'w', encoding='utf-8') as pwd_file:
        pwd_file.write(f'{os.getcwd()}')
        pwd_file.flush()
        os.fsync(pwd_file)

    with open(path_job_config, 'w', encoding='utf-8') as job_config_file:
        json.dump(job.config, job_config_file, cls=ConfigEncoder)
        job_config_file.flush()
        os.fsync(job_config_file)

    with open(path_cmdline, 'w', encoding='utf-8') as cmdline_file:
        cmdline_file.write(f'{cmdline}')
        cmdline_file.flush()
        os.fsync(cmdline_file)
 def __init__(self,
              ec2_instance,
              ec2_service,
              credentials,
              node_prefix='node',
              node_index=1,
              ami_username='******',
              base_logdir=None):
     self.instance = ec2_instance
     self.name = '%s-%s' % (node_prefix, node_index)
     try:
         self.logdir = path.init_dir(base_logdir, self.name)
     except OSError:
         self.logdir = os.path.join(base_logdir, self.name)
     self.ec2 = ec2_service
     self._instance_wait_safe(self.instance.wait_until_running)
     self.wait_public_ip()
     self.is_seed = None
     self.ec2.create_tags(Resources=[self.instance.id],
                          Tags=[{
                              'Key': 'Name',
                              'Value': self.name
                          }])
     # Make the instance created to be immune to Tzach's killer script
     self.ec2.create_tags(Resources=[self.instance.id],
                          Tags=[{
                              'Key': 'keep',
                              'Value': 'alive'
                          }])
     self.remoter = Remote(hostname=self.instance.public_ip_address,
                           user=ami_username,
                           key_file=credentials.key_file)
     logger = logging.getLogger('avocado.test')
     self.log = SDCMAdapter(logger, extra={'prefix': str(self)})
     self.log.debug("SSH access -> 'ssh -i %s %s@%s'", credentials.key_file,
                    ami_username, self.instance.public_ip_address)
     self._journal_thread = None
     self.start_journal_thread()
     # We'll assume 0 coredump for starters, if by a chance there
     # are already coredumps in there the coredump backtrace
     # code will report all of them.
     self._n_coredumps = 0
     self._backtrace_thread = None
     self.start_backtrace_thread()
     self.cs_start_time = None
Example #28
0
def create_job_logs_dir(logdir=None, unique_id=None):
    """
    Create a log directory for a job, or a stand alone execution of a test.

    :param logdir: Base log directory, if `None`, use value from configuration.
    :param unique_id: The unique identification. If `None`, create one.
    :rtype: basestring
    """
    start_time = time.strftime('%Y-%m-%dT%H.%M')
    if logdir is None:
        logdir = get_logs_dir()
    # Stand alone tests handling
    if unique_id is None:
        unique_id = job_id.create_unique_job_id()

    debugbase = 'job-%s-%s' % (start_time, unique_id[:7])
    debugdir = utils_path.init_dir(logdir, debugbase)
    return debugdir
Example #29
0
def create_job_logs_dir(logdir=None, unique_id=None):
    """
    Create a log directory for a job, or a stand alone execution of a test.

    :param logdir: Base log directory, if `None`, use value from configuration.
    :param unique_id: The unique identification. If `None`, create one.
    :rtype: basestring
    """
    start_time = time.strftime('%Y-%m-%dT%H.%M')
    if logdir is None:
        logdir = get_logs_dir()
    # Stand alone tests handling
    if unique_id is None:
        unique_id = job_id.create_unique_job_id()

    debugbase = 'job-%s-%s' % (start_time, unique_id[:7])
    debugdir = utils_path.init_dir(logdir, debugbase)
    return debugdir
Example #30
0
    def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids,
                 service, credentials, cluster_uuid=None,
                 ec2_instance_type='c4.xlarge', ec2_ami_username='******',
                 ec2_user_data='', ec2_block_device_mappings=None,
                 cluster_prefix='cluster',
                 node_prefix='node', n_nodes=10, params=None):
        global CREDENTIALS
        CREDENTIALS.append(credentials)

        self.ec2_ami_id = ec2_ami_id
        self.ec2_subnet_id = ec2_subnet_id
        self.ec2_security_group_ids = ec2_security_group_ids
        self.ec2 = service
        self.credentials = credentials
        self.ec2_instance_type = ec2_instance_type
        self.ec2_ami_username = ec2_ami_username
        if ec2_block_device_mappings is None:
            ec2_block_device_mappings = []
        self.ec2_block_device_mappings = ec2_block_device_mappings
        self.ec2_user_data = ec2_user_data
        self.node_prefix = node_prefix
        self.ec2_ami_id = ec2_ami_id
        if cluster_uuid is None:
            self.uuid = uuid.uuid4()
        else:
            self.uuid = cluster_uuid
        self.shortid = str(self.uuid)[:8]
        self.name = '%s-%s' % (cluster_prefix, self.shortid)
        # I wanted to avoid some parameter passing
        # from the tester class to the cluster test.
        assert 'AVOCADO_TEST_LOGDIR' in os.environ
        try:
            self.logdir = path.init_dir(os.environ['AVOCADO_TEST_LOGDIR'],
                                        self.name)
        except OSError:
            self.logdir = os.path.join(os.environ['AVOCADO_TEST_LOGDIR'],
                                       self.name)
        logger = logging.getLogger('avocado.test')
        self.log = SDCMAdapter(logger, extra={'prefix': str(self)})
        self.log.info('Init nodes')
        self.nodes = []
        self.params = params
        self.add_nodes(n_nodes)
Example #31
0
def get_job_logs_dir(args=None, unique_id=None):
    """
    Create a log directory for a job, or a stand alone execution of a test.

    :param args: :class:`argparse.Namespace` instance with cmdline arguments
                 (optional).
    :rtype: basestring
    """
    start_time = time.strftime('%Y-%m-%dT%H.%M')
    if args is not None:
        logdir = args.logdir or get_logs_dir()
    else:
        logdir = get_logs_dir()
    # Stand alone tests handling
    if unique_id is None:
        unique_id = job_id.create_unique_job_id()

    debugbase = 'job-%s-%s' % (start_time, unique_id[:7])
    debugdir = utils_path.init_dir(logdir, debugbase)
    return debugdir
    def run_stress(self, stress_cmd, timeout, output_dir):
        def check_output(result_obj, node):
            output = result_obj.stdout + result_obj.stderr
            lines = output.splitlines()
            for line in lines:
                if 'java.io.IOException' in line:
                    return ['{}:{}'.format(node, line.strip())]
            return []

        print("Running {} in all loaders, timeout {} s".format(stress_cmd,
                                                               timeout))
        logdir = path.init_dir(output_dir, self.name)
        result_dict = self.run_all_nodes(stress_cmd, timeout=timeout)
        errors = []
        for node in self.nodes:
            result = result_dict[node.instance.public_ip_address]
            log_file_name = os.path.join(logdir,
                                         '{}.log'.format(node.name))
            print("Writing log file {}".format(log_file_name))
            with open(log_file_name, 'w') as log_file:
                log_file.write(result.stdout)
            errors += check_output(result, node)
        return errors
Example #33
0
    def __init__(self, methodName='test', name=None, params=None,
                 base_logdir=None, config=None, runner_queue=None, tags=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests,
                     written with the avocado API, this should not be
                     set.  This is reserved for internal Avocado use,
                     such as when running random executables as tests.
        :type name: :class:`avocado.core.test.TestID`
        :param base_logdir: Directory where test logs should go. If None
                            provided a temporary directory will be created.
        :param config: the job configuration, usually set by command
                       line options and argument parsing
        :type config: dict
        """
        self.__phase = 'INIT'

        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            if not self.__log_warn_used:
                self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.__name = name
        else:
            self.__name = TestID(0, self.__class__.__name__)

        self.__tags = tags

        self._config = config or settings.as_dict()

        self.__base_logdir = base_logdir
        self.__base_logdir_tmp = None
        if self.__base_logdir is None:
            prefix = 'avocado_test_'
            self.__base_logdir_tmp = tempfile.TemporaryDirectory(prefix=prefix)
            self.__base_logdir = self.__base_logdir_tmp.name

        self.__logfile = os.path.join(self.logdir, 'debug.log')

        self._stdout_file = os.path.join(self.logdir, 'stdout')
        self._stderr_file = os.path.join(self.logdir, 'stderr')
        self._output_file = os.path.join(self.logdir, 'output')
        self._logging_handlers = {}

        self.__outputdir = utils_path.init_dir(self.logdir, 'data')

        self.__log = LOG_JOB
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.log.info('INIT %s', self.name)

        paths = ['/test/*']
        if params is None:
            params = []
        elif isinstance(params, tuple):
            params, paths = params[0], params[1]
        self.__params = parameters.AvocadoParams(params, paths,
                                                 self.__log.name)
        default_timeout = getattr(self, "timeout", None)
        self.timeout = self.params.get("timeout", default=default_timeout)

        self.__status = None
        self.__fail_reason = None
        self.__fail_class = None
        self.__traceback = None
        self.__skip_test = False

        # Are initialized lazily
        self.__cache_dirs = None
        self.__base_tmpdir = None
        self.__workdir = None

        self.__running = False
        self.paused = False
        self.paused_msg = ''

        self.__runner_queue = runner_queue

        self.log.debug("Test metadata:")
        if self.filename:
            self.log.debug("  filename: %s", self.filename)
        try:
            teststmpdir = self.teststmpdir
        except EnvironmentError:
            pass
        else:
            self.log.debug("  teststmpdir: %s", teststmpdir)

        unittest.TestCase.__init__(self, methodName=methodName)
        TestData.__init__(self)
Example #34
0
    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.get_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        if params is None:
            params = {}
        self.params = Params(params)
        self._raw_params = params

        self.tag = tag or self.params.get('tag')
        self.job = job

        basename = os.path.basename(self.name)

        tmpdir = data_dir.get_tmp_dir()

        self.basedir = os.path.dirname(inspect.getfile(self.__class__))
        self.datadir = os.path.join(self.basedir, '%s.data' % basename)

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.get_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
        io.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')
        self.log.debug('Test instance parameters:')

        # Set the helper set_default to the params object
        setattr(self.params, 'set_default', self._set_default)

        # Apply what comes from the params dict
        for key in sorted(self.params.keys()):
            self.log.debug('    %s = %s', key, self.params.get(key))
        self.log.debug('')

        # Apply what comes from the default_params dict
        self.log.debug('Default parameters:')
        for key in sorted(self.default_params.keys()):
            self.log.debug('    %s = %s', key, self.default_params.get(key))
            self.params.set_default(key, self.default_params[key])
        self.log.debug('')
        self.log.debug(
            'Test instance params override defaults whenever available')
        self.log.debug('')

        # If there's a timeout set, log a timeout reminder
        if self.params.timeout:
            self.log.info(
                'Test timeout set. Will wait %.2f s for '
                'PID %s to end', float(self.params.timeout), os.getpid())
            self.log.info('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self)
Example #35
0
 def _init_sysinfo(self, job_logdir):
     if self.sysinfo is None:
         basedir = path.init_dir(job_logdir, 'sysinfo')
         self.sysinfo = sysinfo.SysInfo(basedir=basedir)
Example #36
0
class VirtBootstrap(CLICmd):
    """
    Implements the avocado 'virt-bootstrap' subcommand
    """

    name = 'virt-bootstrap'
    description = "Avocado-Virt 'virt-bootstrap' subcommand"

    def run(self, args):
        fail = False
        LOG.info('Probing your system for test requirements')
        try:
            utils_path.find_command('xz')
            logging.debug('xz present')
        except utils_path.CmdNotFoundError:
            LOG.warn("xz not installed. You may install xz (or the "
                     "equivalent on your distro) to fix the problem")
            fail = True

        jeos_sha1_url = ("https://avocado-project.org/data/assets/jeos/25/"
                         "SHA1SUM_JEOS25")
        try:
            LOG.debug('Verifying expected SHA1 sum from %s', jeos_sha1_url)
            sha1_file = urllib2.urlopen(jeos_sha1_url)
            sha1_contents = sha1_file.read()
            sha1 = sha1_contents.split(" ")[0]
            LOG.debug('Expected SHA1 sum: %s', sha1)
        except Exception, exc:
            LOG.error('Failed to get SHA1 from file: %s', exc)
            fail = True
            sha1 = "FAILED TO GET DOWNLOADED FROM AVOCADO-PROJECT"

        jeos_dst_dir = path.init_dir(
            os.path.join(data_dir.get_data_dir(), 'images'))
        jeos_dst_path = os.path.join(jeos_dst_dir, 'jeos-25-64.qcow2.xz')

        if os.path.isfile(jeos_dst_path):
            actual_sha1 = crypto.hash_file(filename=jeos_dst_path,
                                           algorithm="sha1")
        else:
            actual_sha1 = 'FILE DOES NOT EXIST LOCALLY'

        if actual_sha1 != sha1:
            if actual_sha1 == 'FILE DOES NOT EXIST LOCALLY':
                LOG.debug(
                    'JeOS could not be found at %s. Downloading '
                    'it (205 MB). Please wait...', jeos_dst_path)
            else:
                LOG.debug(
                    'JeOS at %s is either corrupted or outdated. '
                    'Downloading a new copy (205 MB). '
                    'Please wait...', jeos_dst_path)
            jeos_url = ("https://avocado-project.org/data/assets/jeos/25/"
                        "jeos-25-64.qcow2.xz")
            try:
                download.url_download(jeos_url, jeos_dst_path)
            except:
                LOG.warn('Exiting upon user request (Download not finished)')
        else:
            LOG.debug('Compressed JeOS image found in %s, with proper SHA1',
                      jeos_dst_path)

        LOG.debug('Uncompressing the JeOS image to restore pristine '
                  'state. Please wait...')
        os.chdir(os.path.dirname(jeos_dst_path))
        cmd = 'xz --keep --force -d %s' % os.path.basename(jeos_dst_path)
        result = process.run(cmd, ignore_status=True)
        if result.exit_status != 0:
            LOG.error('Error uncompressing the image (see details below):\n%s',
                      result)
            fail = True
        else:
            LOG.debug('Successfully uncompressed the image')

        if fail:
            LOG.warn('Problems found probing this system for tests '
                     'requirements. Please check the error messages '
                     'and fix the problems found')
        else:
            LOG.info('Your system appears to be all set to execute tests')
Example #37
0
 def _start_sysinfo(self):
     if hasattr(self.args, 'sysinfo'):
         if self.args.sysinfo == 'on':
             sysinfo_dir = path.init_dir(self.logdir, 'sysinfo')
             self.sysinfo = sysinfo.SysInfo(basedir=sysinfo_dir)
Example #38
0
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, tag=None, job=None, runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.get_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        if params is None:
            params = {}
        self.params = Params(params)
        self._raw_params = params

        self.tag = tag or self.params.get('tag')
        self.job = job

        basename = os.path.basename(self.name)

        if job is not None:
            tmpdir = tempfile.mkdtemp(dir=data_dir.get_tmp_dir(),
                                      prefix='job-%s-' % job.unique_id)
        else:
            tmpdir = tempfile.mkdtemp(dir=data_dir.get_tmp_dir())

        self.basedir = os.path.dirname(inspect.getfile(self.__class__))
        self.datadir = os.path.join(self.basedir, '%s.data' % basename)

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = path.init_dir(tmpdir, basename)
        self.srcdir = path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.get_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        self.logdir = path.init_dir(base_logdir, self.tagged_name)
        io.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = path.init_dir(self.logdir, 'data')
        self.sysinfodir = path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')
        self.log.debug('Test instance parameters:')

        # Set the helper set_default to the params object
        setattr(self.params, 'set_default', self._set_default)

        # Apply what comes from the params dict
        for key in sorted(self.params.keys()):
            self.log.debug('    %s = %s', key, self.params.get(key))
        self.log.debug('')

        # Apply what comes from the default_params dict
        self.log.debug('Default parameters:')
        for key in sorted(self.default_params.keys()):
            self.log.debug('    %s = %s', key, self.default_params.get(key))
            self.params.set_default(key, self.default_params[key])
        self.log.debug('')
        self.log.debug('Test instance params override defaults whenever available')
        self.log.debug('')

        # If there's a timeout set, log a timeout reminder
        if self.params.timeout:
            self.log.info('Test timeout set. Will wait %.2f s for '
                          'PID %s to end',
                          float(self.params.timeout), os.getpid())
            self.log.info('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self)
Example #39
0
 def record_reference_stderr(self):
     utils_path.init_dir(self.datadir)
     shutil.copyfile(self.stderr_file, self.expected_stderr_file)
Example #40
0
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, tag=None, job=None, runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.create_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            if not self.__log_warn_used:
                self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        self.tag = tag or None

        self.job = job

        basename = os.path.basename(self.name)

        tmpdir = data_dir.get_tmp_dir()

        self.filename = inspect.getfile(self.__class__).rstrip('co')
        self.basedir = os.path.dirname(self.filename)
        self.datadir = self.filename + '.data'

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.create_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
        genio.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        mux_entry = ['/test/*']
        if isinstance(params, dict):
            self.default_params = self.default_params.copy()
            self.default_params.update(params)
            params = []
        elif params is None:
            params = []
        elif isinstance(params, tuple):
            params, mux_entry = params[0], params[1]
        self.params = multiplexer.AvocadoParams(params, self.name, self.tag,
                                                mux_entry,
                                                self.default_params)

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self, methodName=methodName)
Example #41
0
 def record_reference_stderr(self):
     utils_path.init_dir(self.datadir)
     shutil.copyfile(self.stderr_file, self.expected_stderr_file)
Example #42
0
class VirtBootstrap(plugin.Plugin):
    """
    Implements the avocado 'virt-bootstrap' subcommand
    """

    name = 'virt_bootstrap'
    enabled = True

    def configure(self, parser):
        self.parser = parser.subcommands.add_parser(
            'virt-bootstrap',
            help='Download image files important to avocado virt tests')
        super(VirtBootstrap, self).configure(self.parser)

    def run(self, args):
        fail = False
        view = output.View(app_args=args)
        view.notify(event='message',
                    msg='Probing your system for test requirements')
        try:
            utils_path.find_command('7za')
            view.notify(event='minor', msg='7zip present')
        except utils_path.CmdNotFoundError:
            view.notify(event='warning',
                        msg=("7za not installed. You may "
                             "install 'p7zip' (or the "
                             "equivalent on your distro) to "
                             "fix the problem"))
            fail = True

        jeos_sha1_url = 'https://lmr.fedorapeople.org/jeos/SHA1SUM_JEOS20'
        try:
            view.notify(event='minor',
                        msg=('Verifying expected SHA1 '
                             'sum from %s' % jeos_sha1_url))
            sha1_file = urllib2.urlopen(jeos_sha1_url)
            sha1_contents = sha1_file.read()
            sha1 = sha1_contents.split(" ")[0]
            view.notify(event='minor', msg='Expected SHA1 sum: %s' % sha1)
        except Exception, e:
            view.notify(event='error',
                        msg='Failed to get SHA1 from file: %s' % e)
            fail = True

        jeos_dst_dir = path.init_dir(
            os.path.join(data_dir.get_data_dir(), 'images'))
        jeos_dst_path = os.path.join(jeos_dst_dir, 'jeos-20-64.qcow2.7z')

        if os.path.isfile(jeos_dst_path):
            actual_sha1 = crypto.hash_file(filename=jeos_dst_path,
                                           algorithm="sha1")
        else:
            actual_sha1 = '0'

        if actual_sha1 != sha1:
            if actual_sha1 == '0':
                view.notify(event='minor',
                            msg=('JeOS could not be found at %s. Downloading '
                                 'it (173 MB). Please wait...' %
                                 jeos_dst_path))
            else:
                view.notify(event='minor',
                            msg=('JeOS at %s is either corrupted or outdated. '
                                 'Downloading a new copy (173 MB). '
                                 'Please wait...' % jeos_dst_path))
            jeos_url = 'https://lmr.fedorapeople.org/jeos/jeos-20-64.qcow2.7z'
            try:
                download.url_download(jeos_url, jeos_dst_path)
            except:
                view.notify(event='warning',
                            msg=('Exiting upon user request (Download '
                                 'not finished)'))
        else:
            view.notify(event='minor',
                        msg=('Compressed JeOS image found '
                             'in %s, with proper SHA1' % jeos_dst_path))

        view.notify(event='minor',
                    msg=('Uncompressing the JeOS image to restore pristine '
                         'state. Please wait...'))
        os.chdir(os.path.dirname(jeos_dst_path))
        result = process.run('7za -y e %s' % os.path.basename(jeos_dst_path),
                             ignore_status=True)
        if result.exit_status != 0:
            view.notify(event='error',
                        msg=('Error uncompressing the image '
                             '(see details below):\n%s' % result))
            fail = True
        else:
            view.notify(event='minor',
                        msg='Successfully uncompressed the image')

        if fail:
            view.notify(event='warning',
                        msg=('Problems found probing this system for tests '
                             'requirements. Please check the error messages '
                             'and fix the problems found'))
        else:
            view.notify(event='message',
                        msg=('Your system appears to be all '
                             'set to execute tests'))
Example #43
0
    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.create_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            if not self.__log_warn_used:
                self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        self.tag = tag or None

        self.job = job

        basename = os.path.basename(self.name)

        tmpdir = data_dir.get_tmp_dir()

        self.filename = inspect.getfile(self.__class__).rstrip('co')
        self.basedir = os.path.dirname(self.filename)
        self.datadir = self.filename + '.data'

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.create_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
        genio.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        mux_entry = ['/test/*']
        if isinstance(params, dict):
            self.default_params = self.default_params.copy()
            self.default_params.update(params)
            params = []
        elif params is None:
            params = []
        elif isinstance(params, tuple):
            params, mux_entry = params[0], params[1]
        self.params = multiplexer.AvocadoParams(params, self.name, self.tag,
                                                mux_entry, self.default_params)

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self, methodName=methodName)