Exemplo n.º 1
0
def _zephyr_testcase_patcher_for_components(testcase):
    for tag in testcase._tags:
        if tag.startswith('component/'):
            # there are already assigned components
            testcase.log.debug("component info found, no need to guess")
            return

    # Is this a Zephyr related testcase? Does it declare any target
    # that will run Zephyr?
    # yeah, wouldn't cover all the basis, but then properly tag the
    # components in your testcase, this is just a guesser for backward
    # compat
    for _target_name, target_data in testcase._targets.iteritems():
        kws = target_data.get('kws', {})
        if 'app_zephyr' in kws:
            # yay, at least one Zephyr target, we takes
            break
    else:
        # of all the targets in this testcase (if any), none declares
        # an App Zephyr, so we can't be sure this is a Zephyr-related
        # testcase
        return

    # There are not assigned components, so let's guess based on paths
    ZEPHYR_BASE = os.environ.get('ZEPHYR_BASE', None)
    ZEPHYR_TESTS_BASE = os.environ.get('ZEPHYR_TESTS_BASE', None)

    roots = []
    if ZEPHYR_BASE:
        roots += [
            os.path.join(ZEPHYR_BASE, "tests"),
            os.path.join(ZEPHYR_BASE, "samples"),
        ]
    if ZEPHYR_TESTS_BASE:
        roots += [
            os.path.join(ZEPHYR_TESTS_BASE, "tests"),
            os.path.join(ZEPHYR_TESTS_BASE, "testlib"),
        ]

    origin = commonl.origin_get()
    filename = testcase.kws.get('thisfile', None)
    if filename == None:
        testcase.log.debug("component info n/a, can't find source file")
        return
    filename = os.path.normpath(os.path.realpath(filename))
    componentl = _zephyr_components_from_path(os.path.abspath(filename), roots)
    for component in componentl:
        component_tag = "component/" + component
        testcase.tag_set(component_tag, component, origin)

    components = " ".join(componentl)
    testcase.log.info("component info guessed: %s", components)
Exemplo n.º 2
0
def _zephyr_testcase_patcher_for_components(testcase):
    for tag in testcase._tags:
        if tag.startswith('component/'):
            # there are already assigned components
            testcase.log.debug("component info found, no need to guess")
            return

    # Is this a Zephyr related testcase? Does it declare any target
    # that will run Zephyr?
    # yeah, wouldn't cover all the basis, but then properly tag the
    # components in your testcase, this is just a guesser for backward
    # compat
    for _target_name, target_data in testcase._targets.items():
        kws = target_data.get('kws', {})
        if 'app_zephyr' in kws:
            # yay, at least one Zephyr target, we takes
            break
    else:
        # of all the targets in this testcase (if any), none declares
        # an App Zephyr, so we can't be sure this is a Zephyr-related
        # testcase
        return

    # There are not assigned components, so let's guess based on paths
    ZEPHYR_BASE = os.environ.get('ZEPHYR_BASE', None)
    ZEPHYR_TESTS_BASE = os.environ.get('ZEPHYR_TESTS_BASE', None)

    roots = []
    if ZEPHYR_BASE:
        roots += [
            os.path.join(ZEPHYR_BASE, "tests"),
            os.path.join(ZEPHYR_BASE, "samples"),
        ]
    if ZEPHYR_TESTS_BASE:
        roots += [
            os.path.join(ZEPHYR_TESTS_BASE, "tests"),
            os.path.join(ZEPHYR_TESTS_BASE, "testlib"),
        ]

    origin = commonl.origin_get()
    filename = testcase.kws.get('thisfile', None)
    if filename == None:
        testcase.log.debug("component info n/a, can't find source file")
        return
    filename = os.path.normpath(os.path.realpath(filename))
    componentl = _zephyr_components_from_path(os.path.abspath(filename), roots)
    for component in componentl:
        component_tag = "component/" + component
        testcase.tag_set(component_tag, component, origin)

    components = " ".join(componentl)
    testcase.log.info("component info guessed: %s", components)
Exemplo n.º 3
0
    def check_filter(self, _objdir, arch, board, _filter, origin=None):
        """
        This is going to be called by the App Builder's build function
        to evaluate if we need to filter out a build of a testcase. In
        any other case, it will be ignored.

        :param str objdir: name of the Zephyr's object directory
          where to find configuration files
        :param str arch: name of the architecture we are building
        :param str board: Zephyr's board we are building
        :param str _filter: Zephyr's sanity Check style filter
          expression
        :param str origin: where does this come from?
        """
        if not origin:
            origin = commonl.origin_get()

        if _filter == None or _filter == "":
            return

        self.target.report_info("filter: processing '%s' @%s" %
                                (_filter, origin),
                                dlevel=1)
        self.target.report_info("filter: reading defconfig", dlevel=2)

        _defconfig = self.config_file_read()
        defconfig = {}
        for key, value in _defconfig.iteritems():
            # The testcase.ini filter language doesn't prefix the
            # CONFIG_ stuff, so we are going to strip it with [7:]
            if key.startswith("CONFIG_"):
                defconfig[key[7:]] = value
            # The testcase.yaml filter language prefixes with
            # CONFIG_ stuff, so we don't strip it
            defconfig[key] = value
        self.target.report_info("filter: evaluating", dlevel=2)
        try:
            res = commonl.expr_parser.parse(_filter, defconfig)
            self.target.report_info("filter: evaluated defconfig: %s" % res,
                                    dlevel=1)
            if res == False:
                raise tc.skip_e("filter '%s' @ %s causes TC to be skipped" %
                                (_filter, origin))
            else:
                self.target.report_info(
                    "filter '%s' @ %s causes TC to be continued" %
                    (_filter, origin),
                    dlevel=2)
        except SyntaxError as se:
            raise tc.error_e("filter: failed processing '%s' @ %s: %s" %
                             (_filter, origin, se))
Exemplo n.º 4
0
    def check_filter(self, _objdir, arch, board, _filter, origin = None):
        """
        This is going to be called by the App Builder's build function
        to evaluate if we need to filter out a build of a testcase. In
        any other case, it will be ignored.

        :param str objdir: name of the Zephyr's object directory
          where to find configuration files
        :param str arch: name of the architecture we are building
        :param str board: Zephyr's board we are building
        :param str _filter: Zephyr's sanity Check style filter
          expression
        :param str origin: where does this come from?
        """
        if not origin:
            origin = commonl.origin_get()

        if _filter == None or _filter == "":
            return

        self.target.report_info("filter: processing '%s' @%s"
                                % (_filter, origin), dlevel = 1)
        self.target.report_info("filter: reading defconfig", dlevel = 2)

        _defconfig = self.config_file_read()
        defconfig = {}
        for key, value in _defconfig.iteritems():
            # The testcase.ini filter language doesn't prefix the
            # CONFIG_ stuff, so we are going to strip it with [7:]
            if key.startswith("CONFIG_"):
                defconfig[key[7:]] = value
            # The testcase.yaml filter language prefixes with
            # CONFIG_ stuff, so we don't strip it
            defconfig[key] = value
        self.target.report_info("filter: evaluating", dlevel = 2)
        try:
            res = commonl.expr_parser.parse(_filter, defconfig)
            self.target.report_info("filter: evaluated defconfig: %s" % res,
                                    dlevel = 1)
            if res == False:
                raise tc.skip_e("filter '%s' @ %s causes TC to be skipped"
                                % (_filter, origin))
            else:
                self.target.report_info(
                    "filter '%s' @ %s causes TC to be continued"
                    % (_filter, origin), dlevel = 2)
        except SyntaxError as se:
            raise tc.error_e("filter: failed processing '%s' @ %s: %s"
                             % (_filter, origin, se))
Exemplo n.º 5
0
Arquivo: app.py Projeto: alan-wr/tcf-1
def driver_add(cls, name=None):
    """
    Add a new driver for app building

    Note the driver will be called as the class name; it is
    recommended to call then *app_something*.
    """
    assert issubclass(cls, app_c)
    if name == None:
        name = cls.__name__
    else:
        assert isinstance(name, basestring)
    if name in _drivers:
        raise ValueError('%s: already registered by @%s' %
                         (name, _drivers[name][1]))
    _drivers[name] = (cls, commonl.origin_get(2))
Exemplo n.º 6
0
Arquivo: app.py Projeto: intel/tcf
def driver_add(cls, name = None):
    """
    Add a new driver for app building

    Note the driver will be called as the class name; it is
    recommended to call then *app_something*.
    """
    assert issubclass(cls, app_c)
    if name == None:
        name = cls.__name__
    else:
        assert isinstance(name, basestring)
    if name in _drivers:
        raise ValueError('%s: already registered by @%s'
                         % (name, _drivers[name][1]))
    _drivers[name] = (cls, commonl.origin_get(2))
Exemplo n.º 7
0
def url_add(url, ssl_ignore=False, aka=None, ca_path=None, origin=None):
    """
    Add a TTBD server

    :param str url: server's URL (``http://SERVER:PORT``); it can be
      *https*; port is most commonly *5000*.
    :param bool ssl_ignore: if True, skips verifying SSL certificates
    :param str aka: Short form for this server, to use in display messages
    """
    # FIXME: move this to the object construction
    u = urllib.parse.urlparse(url)
    if u.scheme == "" or u.netloc == "":
        raise Exception("%s: malformed URL?" % url)
    if not origin:
        origin = "configured " + commonl.origin_get(2)
    logger.info("%s: Added server URL %s", origin, url)
    urls.append((url, ssl_ignore, aka, ca_path))  # COMPAT
    urls_data[url]['origin'] = origin
Exemplo n.º 8
0
    def config_file_write(self, name, data, bsp = None):
        """\
        Write an extra config file called *NAME*.conf in the Zephyr's
        App build directory.

        Note this takes care to only write it if the data is new or
        the file is unexistant, to avoid unnecesary rebuilds.

        :param str name: Name for the configuration file; this has to
          be a valid filename; *.conf* will be added by the function.

        :param str data: Data to include in the configuration file;
          this is (currently) valid kconfig data, which are lines of
          text with # acting as comment character; for example::

            CONFIG_UART_CONSOLE_ON_DEV_NAME="UART_1"

        :param str bsp: (optional) BSP on which to operate; when the
          target is configured for a :term:`BSP model` which contains
          multiple Zephyr BSPs, you will need to specify which one to
          modify.

          This parameter can be omitted if only one BSP is available
          in the current BSP Model.

        *Example*

        >>> if something:
        >>>     target.zephyr.conf_file_write("mytweaks",
        >>>                                   'CONFIG_SOMEVAR=1\\n'
        >>>                                   'CONFIG_ANOTHER="VALUE"\\n')
        """
        target = self.target
        bsp = self._bsp_select(target, bsp)

        # Ensure the config directory is there
        outdir = target._kws_bsp[bsp]['zephyr_objdir']
        try:
            os.makedirs(outdir)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise RuntimeError("%s: Cannot create outdir directory: %s"
                                   % (outdir, str(e)))

        # Now create a .new file
        if not name.endswith(".conf"):
            name += ".conf"
        existing_filename = os.path.join(outdir, name)
        new_filename = existing_filename + ".new"
        with codecs.open(new_filename, "w+", encoding = 'utf-8',
                         errors = 'ignore') as f:
            f.write("""\
# Config file automatically generated by TCF's:
#
#   %s
#
# because of instructions from
#
#   %s
#
# Do not edit by hand

""" % (commonl.origin_get(), commonl.origin_get(2)))
            f.write(data)
            # report the config file we wrote, so reproduction
            # instructions will carry it in the report file; we report
            # the data without the header to make it easier -- note we
            # use report_pass() [vs report_info()] as _info might get
            # filtered as too verbose info, where as pass is
            # information important for passing.
            target.report_pass("enabled Zephyr config file %s at %s"
                               % (name, outdir),
                               { 'config file': data })
        if not os.path.exists(existing_filename):
            shutil.move(new_filename, existing_filename)
        else:
            # Check if there are changes before updating it, to avoid
            # unnecesary rebuilds
            _new_hash = hashlib.sha256()
            new_hash = commonl.hash_file(_new_hash, new_filename)
            _old_hash = hashlib.sha256()
            old_hash = commonl.hash_file(_old_hash, existing_filename)
            if new_hash.digest() != old_hash.digest():
                shutil.move(new_filename, existing_filename)
            else:
                os.unlink(new_filename)
Exemplo n.º 9
0
    def _run(self,
             cmd=None,
             expect=None,
             prompt_regex=None,
             output=False,
             output_filter_crlf=True,
             trim=False,
             console=None,
             origin=None):
        if cmd:
            assert isinstance(cmd, basestring)
        assert expect == None \
            or isinstance(expect, basestring) \
            or isinstance(expect, re._pattern_type) \
            or isinstance(expect, list)
        assert prompt_regex == None \
            or isinstance(prompt_regex, basestring) \
            or isinstance(prompt_regex, re._pattern_type)

        if origin == None:
            origin = commonl.origin_get(3)
        else:
            assert isinstance(origin, basestring)

        target = self.target

        if output:
            offset = self.target.console.size(console=console)

        if cmd:
            self.target.send(cmd, console=console)
        if expect:
            if isinstance(expect, list):
                for expectation in expect:
                    assert isinstance(expectation, basestring) \
                        or isinstance(expectation, re._pattern_type)
                    target.expect(expectation,
                                  name="command output",
                                  console=console,
                                  origin=origin)
            else:
                target.expect(expect,
                              name="command output",
                              console=console,
                              origin=origin)
        if prompt_regex == None:
            self.target.expect(self.shell_prompt_regex,
                               name="shell prompt",
                               console=console,
                               origin=origin)
        else:
            self.target.expect(prompt_regex,
                               name="shell prompt",
                               console=console,
                               origin=origin)
        if output:
            output = self.target.console.read(offset=offset, console=console)
            if output_filter_crlf:
                # replace \r\n, \r\r\n, \r\r\r\r\n... it happens
                output = re.sub(self.crnl_regex, self.target.crlf, output)
            if trim:
                # When we can run(), it usually prints in the console:
                ## <command-echo from our typing>
                ## <command output>
                ## <prompt>
                #
                # So to trim we just remove the first and last
                # lines--won't work well without output_filter_crlf
                # and it is quite a hack.
                first_nl = output.find(self.target.crlf)
                last_nl = output.rfind(self.target.crlf)
                output = output[first_nl + 1:last_nl + 1]
            return output
        return None
Exemplo n.º 10
0
    def __init__(self,
                 config_text=None,
                 config_files=None,
                 use_ssl=False,
                 tmpdir=None,
                 keep_temp=True,
                 errors_ignore=None,
                 warnings_ignore=None,
                 aka=None,
                 local_auth=True):

        # Force all assertions, when running like this, to fail the TC
        tcfl.tc.tc_c.exception_to_result[AssertionError] = tcfl.tc.failed_e

        # If no aka is defined, we make one out of the place when this
        # object is being created, so it is always the same *and* thus
        # the report hashes are always identical with each run
        if aka == None:
            self.aka = "ttbd-" + commonl.mkid(commonl.origin_get(2), 4)
        else:
            self.aka = aka
        if config_files == None:
            config_files = []
        self.keep_temp = keep_temp
        self.port = commonl.tcp_port_assigner()
        self.use_ssl = use_ssl
        if use_ssl == True:
            self.url = "https://localhost:%d" % self.port
            ssl_context = ""
        else:
            self.url = "http://localhost:%d" % self.port
            ssl_context = "--no-ssl"
        self.url_spec = "fullid:'^%s'" % self.aka
        if tmpdir:
            self.tmpdir = tmpdir
        else:
            # default to place the server's dir in the tempdir for
            # testcases
            self.tmpdir = os.path.join(tcfl.tc.tc_c.tmpdir, "server", self.aka)
        shutil.rmtree(self.tmpdir, ignore_errors=True)
        commonl.makedirs_p(self.tmpdir)

        self.etc_dir = os.path.join(self.tmpdir, "etc")
        self.files_dir = os.path.join(self.tmpdir, "files")
        self.lib_dir = os.path.join(self.tmpdir, "lib")
        self.state_dir = os.path.join(self.tmpdir, "state")
        os.mkdir(self.etc_dir)
        os.mkdir(self.files_dir)
        os.mkdir(self.lib_dir)
        os.mkdir(self.state_dir)
        self.stdout = self.tmpdir + "/stdout"
        self.stderr = self.tmpdir + "/stderr"

        for fn in config_files:
            shutil.copy(fn, self.etc_dir)

        with open(os.path.join(self.etc_dir, "conf_00_base.py"), "w") as cfgf:
            cfgf.write(r"""
import ttbl.config
ttbl.config.processes = 2
host = '127.0.0.1'
""")
            # We don't define here the port, so we see it in the
            # command line
            if config_text:
                cfgf.write(config_text)

        self.srcdir = os.path.realpath(
            os.path.join(os.path.dirname(__file__), ".."))
        self.cmdline = [
            "stdbuf",
            "-o0",
            "-e0",
            # This allows us to default to the source location,when
            # running from source, or the installed when running from
            # the system
            os.environ.get("TTBD_PATH", self.srcdir + "/ttbd/ttbd"),
            "--port",
            "%d" % self.port,
            ssl_context,
            "-vvvvv",
            "--files-path",
            self.files_dir,
            "--state-path",
            self.state_dir,
            "--config-path",
            "",  # This empty one is to clear them all
            "--config-path",
            self.etc_dir
        ]
        self.local_auth = local_auth
        if local_auth:
            self.cmdline.append("--local-auth")
        self.p = None
        #: Exclude these regexes / strings from triggering an error
        #: message check
        self.errors_ignore = [] if errors_ignore == None else errors_ignore

        #: Exclude these regexes / strings from triggering an warning
        #: message check
        self.warnings_ignore = [re.compile('daemon lacks CAP_NET_ADMIN')]
        if warnings_ignore:
            self.warnings_ignore += warnings_ignore

        def _preexec_fn():
            stdout_fd = os.open(
                self.stdout,
                # O_CREAT: Always a new file, so
                # we can check for errors and not
                # get confused with previous runs
                os.O_WRONLY | os.O_EXCL | os.O_CREAT,
                0o0644)
            stderr_fd = os.open(
                self.stderr,
                # O_CREAT: Always a new file, so
                # we can check for errors and not
                # get confused with previous runs
                os.O_WRONLY | os.O_EXCL | os.O_CREAT,
                0o0644)
            os.dup2(stdout_fd, 1)
            os.dup2(stderr_fd, 2)

        logging.info("Launching: %s", " ".join(self.cmdline))
        self.p = subprocess.Popen(self.cmdline,
                                  shell=False,
                                  cwd=self.tmpdir,
                                  close_fds=True,
                                  preexec_fn=_preexec_fn,
                                  bufsize=0)
        try:
            self._check_if_alive()
        finally:
            self.check_log_for_issues()
        # if we call self.terminate() from __del__, the garbage
        # collector has started to wipe things, so we can't use, ie:
        # open() to check the log file
        atexit.register(self.terminate)
Exemplo n.º 11
0
Arquivo: testing.py Projeto: intel/tcf
    def __init__(self, config_text = None, config_files = None,
                 use_ssl = False, tmpdir = None, keep_temp = True,
                 errors_ignore = None, warnings_ignore = None,
                 aka = None):

        # Force all assertions, when running like this, to fail the TC
        tcfl.tc.tc_c.exception_to_result[AssertionError] = tcfl.tc.failed_e

        # If no aka is defined, we make one out of the place when this
        # object is being created, so it is always the same *and* thus
        # the report hashes are always identical with each run
        if aka == None:
            self.aka = "ttbd-" + commonl.mkid(commonl.origin_get(2), 4)
        else:
            self.aka = aka
        if config_files == None:
            config_files = []
        self.keep_temp = keep_temp
        self.port = commonl.tcp_port_assigner()
        self.use_ssl = use_ssl
        if use_ssl == True:
            self.url = "https://localhost:%d" % self.port
            ssl_context = ""
        else:
            self.url = "http://localhost:%d" % self.port
            ssl_context = "--no-ssl"
        self.url_spec = "url:'^%s'" % self.url
        if tmpdir:
            self.tmpdir = tmpdir
        else:
            # Don't use colon on the name, or it'll thing it is a path
            self.tmpdir = tempfile.mkdtemp(prefix = "test-ttbd-%d."
                                           % self.port)

        self.etc_dir = os.path.join(self.tmpdir, "etc")
        self.files_dir = os.path.join(self.tmpdir, "files")
        self.lib_dir = os.path.join(self.tmpdir, "lib")
        self.state_dir = os.path.join(self.tmpdir, "state")
        os.mkdir(self.etc_dir)
        os.mkdir(self.files_dir)
        os.mkdir(self.lib_dir)
        os.mkdir(self.state_dir)
        self.stdout = self.tmpdir + "/stdout"
        self.stderr = self.tmpdir + "/stderr"

        for fn in config_files:
            shutil.copy(fn, self.etc_dir)

        with open(os.path.join(self.etc_dir,
                               "conf_00_base.py"), "w") as cfgf:
            cfgf.write(r"""
import ttbl.config
ttbl.config.processes = 2
host = '127.0.0.1'
""")
            # We don't define here the port, so we see it in the
            # command line
            if config_text:
                cfgf.write(config_text)

        srcdir = os.path.realpath(
            os.path.join(os.path.dirname(__file__), ".."))
        self.cmdline = [
            # This allows us to default to the source location,when
            # running from source, or the installed when running from
            # the system
            os.environ.get("TTBD_PATH", srcdir + "/ttbd/ttbd"),
            "--port", "%d" % self.port,
            ssl_context,
            "--local-auth", "-vvvvv",
            "--files-path", self.files_dir,
            "--state-path", self.state_dir,
            "--var-lib-path", self.lib_dir,
            "--config-path", "", # This empty one is to clear them all
            "--config-path", self.etc_dir
        ]
        self.p = None
        #: Exclude these regexes / strings from triggering an error
        #: message check
        self.errors_ignore = [] if errors_ignore == None else errors_ignore

        #: Exclude these regexes / strings from triggering an warning
        #: message check
        self.warnings_ignore = [ re.compile('daemon lacks CAP_NET_ADMIN') ]
        if warnings_ignore:
            self.warnings_ignore =+ warnings_ignore

        def _preexec_fn():
            stdout_fd = os.open(self.stdout,
                                # O_CREAT: Always a new file, so
                                # we can check for errors and not
                                # get confused with previous runs
                                os.O_WRONLY | os.O_EXCL |os.O_CREAT, 0o0644)
            stderr_fd = os.open(self.stderr,
                                # O_CREAT: Always a new file, so
                                # we can check for errors and not
                                # get confused with previous runs
                                os.O_WRONLY | os.O_EXCL |os.O_CREAT, 0o0644)
            os.dup2(stdout_fd, 1)
            os.dup2(stderr_fd, 2)

        logging.info("Launching: %s", " ".join(self.cmdline))
        self.p = subprocess.Popen(
            self.cmdline, shell = False, cwd = self.tmpdir,
            close_fds = True, preexec_fn = _preexec_fn)
        self._check_if_alive()
        self.check_log_for_issues()
Exemplo n.º 12
0
    def config_file_write(self, name, data, bsp = None):
        """\
        Write an extra config file called *NAME*.conf in the Zephyr's
        App build directory.

        Note this takes care to only write it if the data is new or
        the file is unexistant, to avoid unnecesary rebuilds.

        :param str name: Name for the configuration file; this has to
          be a valid filename; *.conf* will be added by the function.

        :param str data: Data to include in the configuration file;
          this is (currently) valid kconfig data, which are lines of
          text with # acting as comment character; for example::

            CONFIG_UART_CONSOLE_ON_DEV_NAME="UART_1"

        :param str bsp: (optional) BSP on which to operate; when the
          target is configured for a :term:`BSP model` which contains
          multiple Zephyr BSPs, you will need to specify which one to
          modify.

          This parameter can be omitted if only one BSP is available
          in the current BSP Model.

        *Example*

        >>> if something:
        >>>     target.zephyr.conf_file_write("mytweaks",
        >>>                                   'CONFIG_SOMEVAR=1\\n'
        >>>                                   'CONFIG_ANOTHER="VALUE"\\n')
        """
        target = self.target
        bsp = self._bsp_select(target, bsp)

        # Ensure the config directory is there
        outdir = target._kws_bsp[bsp]['zephyr_objdir']
        try:
            os.makedirs(outdir)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise RuntimeError("%s: Cannot create outdir directory: %s"
                                   % (outdir, e.message))

        # Now create a .new file
        if not name.endswith(".conf"):
            name += ".conf"
        existing_filename = os.path.join(outdir, name)
        new_filename = existing_filename + ".new"
        with codecs.open(new_filename, "w+", encoding = 'utf-8',
                         errors = 'ignore') as f:
            f.write("""\
# Config file automatically generated by TCF's:
#
#   %s
#
# because of instructions from
#
#   %s
#
# Do not edit by hand

""" % (commonl.origin_get(), commonl.origin_get(2)))
            f.write(data)
            # report the config file we wrote, so reproduction
            # instructions will carry it in the report file; we report
            # the data without the header to make it easier -- note we
            # use report_pass() [vs report_info()] as _info might get
            # filtered as too verbose info, where as pass is
            # information important for passing.
            target.report_pass("enabled Zephyr config file %s at %s"
                               % (name, outdir),
                               { 'config file': data })
        if not os.path.exists(existing_filename):
            shutil.move(new_filename, existing_filename)
        else:
            # Check if there are changes before updating it, to avoid
            # unnecesary rebuilds
            _new_hash = hashlib.sha256()
            new_hash = commonl.hash_file(_new_hash, new_filename)
            _old_hash = hashlib.sha256()
            old_hash = commonl.hash_file(_old_hash, existing_filename)
            if new_hash.digest() != old_hash.digest():
                shutil.move(new_filename, existing_filename)
            else:
                os.unlink(new_filename)
Exemplo n.º 13
0
def setup(*args,
          report_drivers=None,
          verbosity=2,
          logfile_name="run.log",
          name="toplevel",
          runid=None,
          hashid="standalone",
          skip_reports=False,
          **kwargs):
    """
    Setup and Load the TCF Library configuration for standalone execution

    This is needed before you can access from your client program any
    other module.

    :param int verbosity: (optional, default 2) verbosity of output to
      the console

    :param str logfile_name: (optional, default *run.log*) where to
      log the detailed output to.

    :param list(tcfl.tc.report_driver_c) report_drivers: (optional)
      list of drivers for reporting execution data.

      By default, drivers that logs to a logfile, to report files and
      to json files are loaded for you.

    Other arguments as :func:`load`.

    """
    assert runid == None or isinstance(runid, str)
    assert hashid == None or isinstance(hashid, str)
    assert isinstance(skip_reports, bool)

    tcfl.tc.tc_c.runid = runid
    if runid == None:
        tcfl.tc.tc_c.runid_visible = ""
    else:
        tcfl.tc.tc_c.runid_visible = runid
    # Do a partial initialzation of the testcase management system so
    # the only testcase object declared, tcfl.tc.tc_global reflects
    # all the info
    tcfl.tc.tc_c.tmpdir = "tmp"
    # reinitialize this one up, so that we have minimal hash printing
    tcfl.tc.tc_global = tcfl.tc.tc_c(name,
                                     "",
                                     commonl.origin_get(1),
                                     hashid=hashid)
    tcfl.tc.tc_global.skip_reports = skip_reports

    if not report_drivers:
        tcfl.tc.report_driver_c.add(tcfl.tc.report_jinja2.driver("."),
                                    name="jinja2")
        tcfl.tc.report_driver_c.add(tcfl.tc.report_console.driver(
            verbosity, logfile_name, verbosity_logf=100),
                                    name="console")
        tcfl.tc.report_driver_c.add(tcfl.tc.report_data_json.driver(),
                                    name="json")
    else:
        for report_driver in report_drivers:
            tcfl.tc.report_driver_c.add(report_driver)
    load(*args, **kwargs)
    tcfl.msgid_c.tls.msgid_lifo.append(tcfl.msgid_c(""))
Exemplo n.º 14
0
def mount_fs(target, image, boot_dev):
    """
    Boots a root filesystem on /mnt

    The partition used as a root filesystem is picked up based on the
    image that is going to be installed; we look for one that has the
    most similar image already installed and pick that.

    :returns: name of the root partition device
    """
    pos_reinitialize = target.property_get("pos_reinitialize", False)
    if pos_reinitialize:
        # Need to reinit the partition table (we were told to by
        # setting pos_repartition to anything
        target.report_info("POS: repartitioning per pos_reinitialize "
                           "property")
        for tag in target.rt.keys():
            # remove pos_root_*, as they don't apply anymore
            if tag.startswith("pos_root_"):
                target.property_set(tag, None)
        _disk_partition(target)
        target.property_set('pos_reinitialize', None)

    root_part_dev = _rootfs_guess(target, image, boot_dev)
    # save for other functions called later
    target.root_part_dev = root_part_dev
    root_part_dev_base = os.path.basename(root_part_dev)
    image_prev = target.property_get("pos_root_" + root_part_dev_base,
                                     "nothing")
    target.report_info("POS: will use %s for root partition (had %s before)"
                       % (root_part_dev, image_prev))

    # fsinfo looks like described in target.pos._fsinfo_load()
    dev_info = None
    for blockdevice in target.pos.fsinfo.get('blockdevices', []):
        for child in blockdevice.get('children', []):
            if child['name'] == root_part_dev_base:
                dev_info = child
    if dev_info == None:
        # it cannot be we might have to repartition because at this
        # point *we* have partitoned.
        raise tc.error_e(
            "Can't find information for root device %s in FSinfo array"
            % root_part_dev_base,
            dict(fsinfo = target.pos.fsinfo))

    # what format does it currently have?
    current_fstype = dev_info.get('fstype', 'ext4')

    # What format does it have to have?
    #
    # Ok, here we need to note that we can't have multiple root
    # filesystems with the same UUID or LABEL, so the image can't rely
    # on UUIDs
    #
    img_fss = target.pos.metadata.get('filesystems', {})
    if '/' in img_fss:
        # a common origin is ok because the YAML schema forces both
        # fstype and mkfs_opts to be specified
        origin = "image's /.tcf.metadata.yaml"
        fsdata = img_fss.get('/', {})
    else:
        origin = "defaults @" + commonl.origin_get(0)
        fsdata = {}
    fstype = fsdata.get('fstype', 'ext4')
    mkfs_opts = fsdata.get('mkfs_opts', '-Fj')

    # do they match?
    if fstype != current_fstype:
        target.report_info(
            "POS: reformatting %s because current format is '%s' and "
            "'%s' is needed (per %s)"
            % (root_part_dev, current_fstype, fstype, origin))
        _mkfs(target, root_part_dev, fstype, mkfs_opts)
    else:
        target.report_info(
            "POS: no need to reformat %s because current format is '%s' and "
            "'%s' is needed (per %s)"
            % (root_part_dev, current_fstype, fstype, origin), dlevel = 1)

    for try_count in range(3):
        target.report_info("POS: mounting root partition %s onto /mnt "
                           "to image [%d/3]" % (root_part_dev, try_count))

        # don't let it fail or it will raise an exception, so we
        # print FAILED in that case to look for stuff; note the
        # double apostrophe trick so the regex finder doens't trip
        # on the command
        output = target.shell.run(
            "mount %s /mnt || echo FAI''LED" % root_part_dev,
            output = True)
        # What did we get?
        if 'FAILED' in output:
            if 'special device ' + root_part_dev \
               + ' does not exist.' in output:
                _disk_partition(target)
            else:
                # ok, this probably means probably the partitions are not
                # formatted; so let's just reformat and retry 
                _mkfs(target, root_part_dev, fstype, mkfs_opts)
        else:
            target.report_info("POS: mounted %s onto /mnt to image"
                               % root_part_dev)
            return root_part_dev	# it worked, we are done
        # fall through, retry
    else:
        raise tc.blocked_e(
            "POS: Tried to mount too many times and failed",
            dict(target = target))
Exemplo n.º 15
0
def config_watch_add(bus_name, driver_name, device_name, actions):
    r"""

    :param str bus_name: name of bus in */sys/bus* to watch
    :param str driver_name: name of driver in
      */sys/bus/BUS_NAME/drivers* to watch
    :param str device_name: device under
      /sys/bus/BUS_NAME/drivers/DRIVER_NAME to watch; if *None*, watch
      all of them
    :param dict actions: dictionary describing actions to do; key is a
      substring of a message, value is a function to call or a tuple
      that starts with a function to call and the rest are arguments
      to add

      The action function has to follow this prototype:

      >>> def action_function(bus_name, driver_name, device_name,
                              condition, entry, *args, **kwargs:

      thus, when called, bus_name, driver_name and device_name are all
      the names of the entity that is causing it; condition is the
      condition string that was matched (the key) and *entry* is the
      journal entry which matched. *\*args* and *\*\*kwargs* are the
      extra arguments given in the *actions* value tuple.

    """

    assert isinstance(bus_name, basestring)
    assert isinstance(driver_name, basestring)
    if device_name:
        if isinstance(device_name, basestring):
            _device_name = "/" + device_name
        elif isinstance(device_name, re._pattern_type):
            _device_name = "/" + device_name.pattern
        else:
            raise AssertionError(
                "'device_name' must be string or regex, found %s",
                type(device_name).__name__)
    else:
        _device_name = ""
    assert isinstance(actions, dict)
    global _watch_rules

    _actions = {}
    origin = commonl.origin_get(2)
    # verify arguments and transform all the actions to a unique
    # form (all have to be a list)
    for condition, action in actions.iteritems():
        assert isinstance(condition, basestring), \
            "Key passed as condition is not a string"
        try:
            action_fn = action[0]
            _actions[condition] = action
        except TypeError:
            action_fn = action
            _actions[condition] = [action_fn]
        assert callable(action_fn), \
            "Argument passed as action function to condition '%s' " \
            "is not callable" % condition

    driver_path = os.path.join("/sys/bus", bus_name, "drivers", driver_name)
    if not os.path.isdir(driver_path):
        logging.warning(
            "%s/%s%s @%s: driver path does not exist, will not monitor",
            bus_name, driver_name, _device_name, origin)
        return
    _watch_rules.append((bus_name, driver_name, device_name, _actions, origin))
    logging.info("%s/%s%s @%s: will monitor", bus_name, driver_name,
                 _device_name, origin)
Exemplo n.º 16
0
def mount_fs(target, image, boot_dev):
    """
    Boots a root filesystem on /mnt

    The partition used as a root filesystem is picked up based on the
    image that is going to be installed; we look for one that has the
    most similar image already installed and pick that.

    :returns: name of the root partition device
    """
    # does the disk have a partition scheme we recognize?
    pos_partsizes = target.rt['pos_partsizes']
    # the name we'll give to the boot partition; see
    # _disk_partition(); if we can't find it, we assume the things
    # needs to be repartitioned. Note it includes the sizes, so if we
    # change the sizes in the config it reformats automatically.  #
    # TCF-multiroot-NN:NN:NN:NN
    target._boot_label_name = "TCF-multiroot-" + pos_partsizes
    pos_reinitialize_force = True
    boot_dev_base = os.path.basename(boot_dev)
    child = target.pos.fsinfo_get_child_by_partlabel(boot_dev_base,
                                                     target._boot_label_name)
    if child:
        pos_reinitialize_force = False
    else:
        target.report_info("POS: repartitioning due to different"
                           " partition schema")

    pos_reinitialize = target.property_get("pos_reinitialize", False)
    if pos_reinitialize:
        target.report_info("POS: repartitioning per pos_reinitialize "
                           "property")
    if pos_reinitialize or pos_reinitialize_force:
        # Need to reinit the partition table (we were told to by
        # setting pos_repartition to anything or we didn't recognize
        # the existing partitioning scheme)
        for tag in target.rt.keys():
            # remove pos_root_*, as they don't apply anymore
            if tag.startswith("pos_root_"):
                target.property_set(tag, None)
        _disk_partition(target)
        target.pos.fsinfo_read(target._boot_label_name)
        target.property_set('pos_reinitialize', None)

    root_part_dev = _rootfs_guess(target, image, boot_dev)
    # save for other functions called later
    target.root_part_dev = root_part_dev
    root_part_dev_base = os.path.basename(root_part_dev)
    image_prev = target.property_get("pos_root_" + root_part_dev_base,
                                     "nothing")
    target.report_info("POS: will use %s for root partition (had %s before)" %
                       (root_part_dev, image_prev))

    # fsinfo looks like described in target.pos.fsinfo_read()
    dev_info = None
    for blockdevice in target.pos.fsinfo.get('blockdevices', []):
        for child in blockdevice.get('children', []):
            if child['name'] == root_part_dev_base:
                dev_info = child
    if dev_info == None:
        # it cannot be we might have to repartition because at this
        # point *we* have partitioned.
        raise tc.error_e(
            "Can't find information for root device %s in FSinfo array" %
            root_part_dev_base, dict(fsinfo=target.pos.fsinfo))

    # what format does it currently have?
    current_fstype = dev_info.get('fstype', 'ext4')

    # What format does it have to have?
    #
    # Ok, here we need to note that we can't have multiple root
    # filesystems with the same UUID or LABEL, so the image can't rely
    # on UUIDs
    #
    img_fss = target.pos.metadata.get('filesystems', {})
    if '/' in img_fss:
        # a common origin is ok because the YAML schema forces both
        # fstype and mkfs_opts to be specified
        origin = "image's /.tcf.metadata.yaml"
        fsdata = img_fss.get('/', {})
    else:
        origin = "defaults @" + commonl.origin_get(0)
        fsdata = {}
    fstype = fsdata.get('fstype', 'ext4')
    mkfs_opts = fsdata.get('mkfs_opts', '-Fj')

    # do they match?
    if fstype != current_fstype:
        target.report_info(
            "POS: reformatting %s because current format is '%s' and "
            "'%s' is needed (per %s)" %
            (root_part_dev, current_fstype, fstype, origin))
        _mkfs(target, root_part_dev, fstype, mkfs_opts)
    else:
        target.report_info(
            "POS: no need to reformat %s because current format is '%s' and "
            "'%s' is needed (per %s)" %
            (root_part_dev, current_fstype, fstype, origin),
            dlevel=1)

    for try_count in range(3):
        target.report_info("POS: mounting root partition %s onto /mnt "
                           "to image [%d/3]" % (root_part_dev, try_count))

        # don't let it fail or it will raise an exception, so we
        # print FAILED in that case to look for stuff; note the
        # double apostrophe trick so the regex finder doens't trip
        # on the command
        output = target.shell.run("mount %s /mnt || echo FAI''LED" %
                                  root_part_dev,
                                  output=True)
        # What did we get?
        if 'FAILED' in output:
            if 'special device ' + root_part_dev \
               + ' does not exist.' in output:
                _disk_partition(target)
                target.pos.fsinfo_read(target._boot_label_name)
            else:
                # ok, this probably means probably the partitions are not
                # formatted; so let's just reformat and retry
                _mkfs(target, root_part_dev, fstype, mkfs_opts)
        else:
            target.report_info("POS: mounted %s onto /mnt to image" %
                               root_part_dev)
            return root_part_dev  # it worked, we are done
        # fall through, retry
    else:
        raise tc.blocked_e("POS: Tried to mount too many times and failed",
                           dict(target=target))
Exemplo n.º 17
0
    def _run(self,
             cmd=None,
             expect=None,
             prompt_regex=None,
             output=False,
             output_filter_crlf=True,
             trim=False,
             console=None,
             origin=None):
        if cmd:
            assert isinstance(cmd, str)
        assert expect == None \
            or isinstance(expect, str) \
            or isinstance(expect, typing.Pattern) \
            or isinstance(expect, list)
        assert prompt_regex == None \
            or isinstance(prompt_regex, str) \
            or isinstance(prompt_regex, typing.Pattern)

        if origin == None:
            origin = commonl.origin_get(3)
        else:
            assert isinstance(origin, str)

        target = self.target
        testcase = target.testcase

        if output:
            offset = self.target.console.size(console=console)

        # the protocol needs UTF-8 anyway
        cmd = commonl.str_cast_maybe(cmd)
        if cmd and not self._fixups:
            self.target.send(cmd, console=console)
        if cmd and self._fixups:
            # we have to handle CRLF at the end ourselves here, we
            # can't defer to target.send() -- see that for how we get
            # crlf.
            if console == None:
                console = target.console.default
            crlf = target.console.crlf.get(console, None)
            cmd += crlf

            try:
                # send the command, doing echo verification if
                if origin == None:
                    origin = commonl.origin_get(2)
                testcase.tls.echo_cmd = cmd
                testcase.tls.echo_waiter = self.target.console.text(
                    cmd,
                    name="shell echo",
                    console=console,
                    timeout=10,
                )
                while True:
                    testcase.tls.echo_cmd_leftover = None
                    self.target.send(testcase.tls.echo_cmd,
                                     crlf=None,
                                     console=console)
                    testcase.tls.echo_waiter.regex_set(testcase.tls.echo_cmd)
                    testcase.expect(
                        testcase.tls.echo_waiter,
                        **self._fixups,
                    )
                    if testcase.tls.echo_cmd_leftover == None:
                        break
                    self.target.report_info(
                        "shell/fixup: resuming partially interrupted command"
                        " by message printed in console; sending: " +
                        commonl.str_bytes_cast(testcase.tls.echo_cmd, str))
                    testcase.tls.echo_cmd = testcase.tls.echo_cmd_leftover
                    continue
            finally:
                testcase.tls.echo_cmd = None

        console_name = console if console else "default"
        self.target.report_info(f"shell/{console_name}: sent command: {cmd}")
        if expect:
            if isinstance(expect, list):
                for expectation in expect:
                    assert isinstance(expectation, str) \
                        or isinstance(expectation, typing.Pattern)
                    r = target.expect(expectation,
                                      name="command output",
                                      console=console,
                                      origin=origin)
            else:
                r = target.expect(expect,
                                  name="command output",
                                  console=console,
                                  origin=origin)
        else:
            r = None

        if prompt_regex == None:
            self.target.expect(self.prompt_regex,
                               name="shell prompt",
                               console=console,
                               origin=origin)
        else:
            self.target.expect(prompt_regex,
                               name="shell prompt",
                               console=console,
                               origin=origin)
        if not output:
            return r

        # we need to return output, postprocess it
        if console == None:
            console = target.console.default
        if output_filter_crlf:
            newline = None
        else:
            newline = ''
        output = self.target.console.read(offset=offset,
                                          console=console,
                                          newline=newline)
        if trim:
            # When we can run(), it usually prints in the console:
            ## <command-echo from our typing>
            ## <command output>
            ## <prompt>
            #
            # So to trim we just remove the first and last
            # lines--won't work well without output_filter_crlf
            # and it is quite a hack.
            first_nl = output.find("\n")
            last_nl = output.rfind("\n")
            output = output[first_nl + 1:last_nl + 1]
        return output