Exemple #1
0
def check_glibc_ver(ver):
    glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
    glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
    if utils.compare_versions(glibc_ver, ver) == -1:
        raise error.TestError("Glibc too old (%s). Glibc >= %s is needed." %
                              (glibc_ver, ver))
Exemple #2
0
 def assert_(self, expr, msg='Assertion failed.'):
     if not expr:
         raise error.TestError(msg)
Exemple #3
0
    def run_once(self, params):
        # Convert params to a Params object
        params = virt_utils.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        virt_utils.set_log_file_dir(self.debugdir)

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("env", "env"))
        env = virt_utils.Env(env_filename, self.env_version)

        test_passed = False

        try:
            try:
                try:
                    # Get the test routine corresponding to the specified
                    # test type
                    t_type = params.get("type")
                    # Verify if we have the correspondent source file for it
                    virt_dir = os.path.dirname(virt_utils.__file__)
                    subtest_dir_common = os.path.join(virt_dir, "tests")
                    subtest_dir_test = os.path.join(self.bindir, "tests")
                    subtest_dir = None
                    for d in [subtest_dir_test, subtest_dir_common]:
                        module_path = os.path.join(d, "%s.py" % t_type)
                        if os.path.isfile(module_path):
                            subtest_dir = d
                            break
                    if subtest_dir is None:
                        raise error.TestError(
                            "Could not find test file %s.py "
                            "on either %s or %s directory" %
                            (t_type, subtest_dir_test, subtest_dir_common))
                    # Load the test module
                    f, p, d = imp.find_module(t_type, [subtest_dir])
                    test_module = imp.load_module(t_type, f, p, d)
                    f.close()

                    # Preprocess
                    try:
                        virt_env_process.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    run_func = getattr(test_module, "run_%s" % t_type)
                    try:
                        run_func(self, params, env)
                    finally:
                        env.save()
                    test_passed = True

                except Exception, e:
                    logging.error("Test failed: %s: %s", e.__class__.__name__,
                                  e)
                    try:
                        virt_env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        virt_env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "kvm":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Exemple #4
0
 def is_bondable(self):
     raise error.TestError('Undefined')
Exemple #5
0
    def send(self, packet):
        """Send an ethernet packet."""
        if self._socket is None:
            raise error.TestError('Raw socket not open')

        self._socket.send(packet)
Exemple #6
0
def _installer_class(install_mode):
    c = installer_classes.get(install_mode)
    if c is None:
        raise error.TestError('Invalid or unsupported'
                              ' install mode: %s' % install_mode)
    return c
Exemple #7
0
 def wait_for_carrier(self, timeout=60):
     while timeout and self.get_carrier() != '1':
         timeout -= 1
         time.sleep(1)
     if timeout == 0:
         raise error.TestError('Timed out waiting for carrier.')
    def _check_one_cortex_a12(self, cpuinfo):
        """
        Check the errata for a Cortex-A12.

        :param cpuinfo: The CPU info for one CPU.  See _parse_cpu_info for
                        the format.

        >>> _testobj._get_regid_to_val = lambda cpu_id: {}
        >>> try:
        ...     _testobj._check_one_cortex_a12({
        ...         "processor": 2,
        ...         "model name": "ARMv7 Processor rev 1 (v7l)",
        ...         "CPU implementer": ord("A"),
        ...         "CPU part": 0xc0d,
        ...         "CPU variant": 0,
        ...         "CPU revision": 1})
        ... except Exception:
        ...     import traceback
        ...     print traceback.format_exc(),
        Traceback (most recent call last):
        ...
        TestError: Kernel didn't provide register vals

        >>> _testobj._get_regid_to_val = lambda cpu_id: \
               {"(p15, 0, c15, c0, 1)": 0, "(p15, 0, c15, c0, 2)": 0}
        >>> try:
        ...     _testobj._check_one_cortex_a12({
        ...         "processor": 2,
        ...         "model name": "ARMv7 Processor rev 1 (v7l)",
        ...         "CPU implementer": ord("A"),
        ...         "CPU part": 0xc0d,
        ...         "CPU variant": 0,
        ...         "CPU revision": 1})
        ... except Exception:
        ...     import traceback
        ...     print traceback.format_exc(),
        Traceback (most recent call last):
        ...
        TestError: Missing bit 12 for erratum 818325 / 852422: 0x00000000

        >>> _testobj._get_regid_to_val = lambda cpu_id: \
               { "(p15, 0, c15, c0, 1)": (1 << 12) | (1 << 24), \
                 "(p15, 0, c15, c0, 2)": (1 << 1)}
        >>> _info_io.seek(0); _info_io.truncate()
        >>> _testobj._check_one_cortex_a12({
        ...    "processor": 2,
        ...     "model name": "ARMv7 Processor rev 1 (v7l)",
        ...     "CPU implementer": ord("A"),
        ...     "CPU part": 0xc0d,
        ...     "CPU variant": 0,
        ...     "CPU revision": 1})
        >>> "good" in _info_io.getvalue()
        True

        >>> _testobj._check_one_cortex_a12({
        ...    "processor": 2,
        ...     "model name": "ARMv7 Processor rev 1 (v7l)",
        ...     "CPU implementer": ord("A"),
        ...     "CPU part": 0xc0d,
        ...     "CPU variant": 0,
        ...     "CPU revision": 2})
        Traceback (most recent call last):
        ...
        TestError: Unexpected A12 revision: r0p2
        """
        cpu_id = cpuinfo["processor"]
        variant = cpuinfo.get("CPU variant", -1)
        revision = cpuinfo.get("CPU revision", -1)

        # Handy to express this the way ARM does
        rev_str = "r%dp%d" % (variant, revision)

        # We don't ever expect an A12 newer than r0p1.  If we ever see one
        # then maybe the errata was fixed.
        if rev_str not in ("r0p0", "r0p1"):
            raise error.TestError("Unexpected A12 revision: %s" % rev_str)

        regid_to_val = self._get_regid_to_val(cpu_id)

        # Getting this means we're missing the change to expose debug
        # registers via arm_coprocessor_debug
        if not regid_to_val:
            raise error.TestError("Kernel didn't provide register vals")

        # Erratum 818325 applies to old A12s and erratum 852422 to newer.
        # Fix is to set bit 12 in diag register.  Confirm that's done.
        diag_reg = regid_to_val.get("(p15, 0, c15, c0, 1)")
        if diag_reg is None:
            raise error.TestError("Kernel didn't provide diag register")
        elif not (diag_reg & (1 << 12)):
            raise error.TestError(
                "Missing bit 12 for erratum 818325 / 852422: %#010x" %
                diag_reg)
        logging.info("CPU %d: erratum 818325 / 852422 good", cpu_id)

        # Erratum 821420 applies to all A12s.  Make sure bit 1 of the
        # internal feature register is set.
        int_feat_reg = regid_to_val.get("(p15, 0, c15, c0, 2)")
        if int_feat_reg is None:
            raise error.TestError("Kernel didn't provide internal feature reg")
        elif not (int_feat_reg & (1 << 1)):
            raise error.TestError(
                "Missing bit 1 for erratum 821420: %#010x" % int_feat_reg)
        logging.info("CPU %d: erratum 821420 good", cpu_id)

        # Erratum 825619 applies to all A12s.  Need bit 24 in diag reg.
        diag_reg = regid_to_val.get("(p15, 0, c15, c0, 1)")
        if diag_reg is None:
            raise error.TestError("Kernel didn't provide diag register")
        elif not (diag_reg & (1 << 24)):
            raise error.TestError(
                "Missing bit 24 for erratum 825619: %#010x" % diag_reg)
        logging.info("CPU %d: erratum 825619 good", cpu_id)
    def _check_one_cortex_a17(self, cpuinfo):
        """
        Check the errata for a Cortex-A17.

        :param cpuinfo: The CPU info for one CPU.  See _parse_cpu_info for
                        the format.

        >>> _testobj._get_regid_to_val = lambda cpu_id: {}
        >>> try:
        ...     _testobj._check_one_cortex_a17({
        ...         "processor": 2,
        ...         "model name": "ARMv7 Processor rev 1 (v7l)",
        ...         "CPU implementer": ord("A"),
        ...         "CPU part": 0xc0e,
        ...         "CPU variant": 1,
        ...         "CPU revision": 1})
        ... except Exception:
        ...     import traceback
        ...     print traceback.format_exc(),
        Traceback (most recent call last):
        ...
        TestError: Kernel didn't provide register vals

        >>> _testobj._get_regid_to_val = lambda cpu_id: \
               {"(p15, 0, c15, c0, 1)": 0}
        >>> try:
        ...     _testobj._check_one_cortex_a17({
        ...         "processor": 2,
        ...         "model name": "ARMv7 Processor rev 1 (v7l)",
        ...         "CPU implementer": ord("A"),
        ...         "CPU part": 0xc0e,
        ...         "CPU variant": 1,
        ...         "CPU revision": 1})
        ... except Exception:
        ...     import traceback
        ...     print traceback.format_exc(),
        Traceback (most recent call last):
        ...
        TestError: Missing bit 24 for erratum 852421: 0x00000000

        >>> _testobj._get_regid_to_val = lambda cpu_id: \
               {"(p15, 0, c15, c0, 1)": (1 << 12) | (1 << 24)}
        >>> _info_io.seek(0); _info_io.truncate()
        >>> _testobj._check_one_cortex_a17({
        ...    "processor": 2,
        ...     "model name": "ARMv7 Processor rev 1 (v7l)",
        ...     "CPU implementer": ord("A"),
        ...     "CPU part": 0xc0e,
        ...     "CPU variant": 1,
        ...     "CPU revision": 2})
        >>> "good" in _info_io.getvalue()
        True

        >>> _info_io.seek(0); _info_io.truncate()
        >>> _testobj._check_one_cortex_a17({
        ...    "processor": 2,
        ...     "model name": "ARMv7 Processor rev 1 (v7l)",
        ...     "CPU implementer": ord("A"),
        ...     "CPU part": 0xc0e,
        ...     "CPU variant": 2,
        ...     "CPU revision": 0})
        >>> print _info_io.getvalue()
        CPU 2: new A17 (r2p0) = no errata
        """
        cpu_id = cpuinfo["processor"]
        variant = cpuinfo.get("CPU variant", -1)
        revision = cpuinfo.get("CPU revision", -1)

        # Handy to express this the way ARM does
        rev_str = "r%dp%d" % (variant, revision)

        regid_to_val = self._get_regid_to_val(cpu_id)

        # Erratum 852421 and 852423 apply to "r1p0", "r1p1", "r1p2"
        if rev_str in ("r1p0", "r1p1", "r1p2"):
            # Getting this means we're missing the change to expose debug
            # registers via arm_coprocessor_debug
            if not regid_to_val:
                raise error.TestError("Kernel didn't provide register vals")

            diag_reg = regid_to_val.get("(p15, 0, c15, c0, 1)")
            if diag_reg is None:
                raise error.TestError("Kernel didn't provide diag register")
            elif not (diag_reg & (1 << 24)):
                raise error.TestError(
                    "Missing bit 24 for erratum 852421: %#010x" % diag_reg)
            logging.info("CPU %d: erratum 852421 good",cpu_id)

            diag_reg = regid_to_val.get("(p15, 0, c15, c0, 1)")
            if diag_reg is None:
                raise error.TestError("Kernel didn't provide diag register")
            elif not (diag_reg & (1 << 12)):
                raise error.TestError(
                    "Missing bit 12 for erratum 852423: %#010x" % diag_reg)
            logging.info("CPU %d: erratum 852423 good",cpu_id)
        else:
            logging.info("CPU %d: new A17 (%s) = no errata", cpu_id, rev_str)
def run_qmp_basic(test, params, env):
    """
    QMP Specification test-suite: this checks if the *basic* protocol conforms
    to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree.

    IMPORTANT NOTES:

        o Most tests depend heavily on QMP's error information (eg. classes),
          this might have bad implications as the error interface is going to
          change in QMP

        o Command testing is *not* covered in this suite. Each command has its
          own specification and should be tested separately

        o We use the same terminology as used by the QMP specification,
          specially with regard to JSON types (eg. a Python dict is called
          a json-object)

        o This is divided in sub test-suites, please check the bottom of this
          file to check the order in which they are run

    TODO:

        o Finding which test failed is not as easy as it should be

        o Are all those check_*() functions really needed? Wouldn't a
          specialized class (eg. a Response class) do better?
    """
    def fail_no_key(qmp_dict, key):
        if not isinstance(qmp_dict, dict):
            raise error.TestFail("qmp_dict is not a dict (it's '%s')" %
                                 type(qmp_dict))
        if not key in qmp_dict:
            raise error.TestFail("'%s' key doesn't exist in dict ('%s')" %
                                 (key, str(qmp_dict)))

    def check_dict_key(qmp_dict, key, keytype):
        """
        Performs the following checks on a QMP dict key:

        1. qmp_dict is a dict
        2. key exists in qmp_dict
        3. key is of type keytype

        If any of these checks fails, error.TestFail is raised.
        """
        fail_no_key(qmp_dict, key)
        if not isinstance(qmp_dict[key], keytype):
            raise error.TestFail("'%s' key is not of type '%s', it's '%s'" %
                                 (key, keytype, type(qmp_dict[key])))

    def check_key_is_dict(qmp_dict, key):
        check_dict_key(qmp_dict, key, dict)

    def check_key_is_list(qmp_dict, key):
        check_dict_key(qmp_dict, key, list)

    def check_key_is_str(qmp_dict, key):
        check_dict_key(qmp_dict, key, unicode)

    def check_str_key(qmp_dict, keyname, value=None):
        check_dict_key(qmp_dict, keyname, unicode)
        if value and value != qmp_dict[keyname]:
            raise error.TestFail("'%s' key value '%s' should be '%s'" %
                                 (keyname, str(qmp_dict[keyname]), str(value)))

    def check_key_is_int(qmp_dict, key):
        fail_no_key(qmp_dict, key)
        try:
            value = int(qmp_dict[key])
        except:
            raise error.TestFail("'%s' key is not of type int, it's '%s'" %
                                 (key, type(qmp_dict[key])))

    def check_bool_key(qmp_dict, keyname, value=None):
        check_dict_key(qmp_dict, keyname, bool)
        if value and value != qmp_dict[keyname]:
            raise error.TestFail("'%s' key value '%s' should be '%s'" %
                                 (keyname, str(qmp_dict[keyname]), str(value)))

    def check_success_resp(resp, empty=False):
        """
        Check QMP OK response.

        @param resp: QMP response
        @param empty: if True, response should not contain data to return
        """
        check_key_is_dict(resp, "return")
        if empty and len(resp["return"]) > 0:
            raise error.TestFail("success response is not empty ('%s')" %
                                 str(resp))

    def check_error_resp(resp, classname=None, datadict=None):
        """
        Check QMP error response.

        @param resp: QMP response
        @param classname: Expected error class name
        @param datadict: Expected error data dictionary
        """
        check_key_is_dict(resp, "error")
        check_key_is_str(resp["error"], "class")
        if classname and resp["error"]["class"] != classname:
            raise error.TestFail("got error class '%s' expected '%s'" %
                                 (resp["error"]["class"], classname))
        check_key_is_dict(resp["error"], "data")
        if datadict and resp["error"]["data"] != datadict:
            raise error.TestFail("got data dict '%s' expected '%s'" %
                                 (resp["error"]["data"], datadict))

    def test_version(version):
        """
        Check the QMP greeting message version key which, according to QMP's
        documentation, should be:

        { "qemu": { "major": json-int, "minor": json-int, "micro": json-int }
          "package": json-string }
        """
        check_key_is_dict(version, "qemu")
        for key in ["major", "minor", "micro"]:
            check_key_is_int(version["qemu"], key)
        check_key_is_str(version, "package")

    def test_greeting(greeting):
        check_key_is_dict(greeting, "QMP")
        check_key_is_dict(greeting["QMP"], "version")
        check_key_is_list(greeting["QMP"], "capabilities")

    def greeting_suite(monitor):
        """
        Check the greeting message format, as described in the QMP
        specfication section '2.2 Server Greeting'.

        { "QMP": { "version": json-object, "capabilities": json-array } }
        """
        greeting = monitor.get_greeting()
        test_greeting(greeting)
        test_version(greeting["QMP"]["version"])

    def json_parsing_errors_suite(monitor):
        """
        Check that QMP's parser is able to recover from parsing errors, please
        check the JSON spec for more info on the JSON syntax (RFC 4627).
        """
        # We're quite simple right now and the focus is on parsing errors that
        # have already biten us in the past.
        #
        # TODO: The following test-cases are missing:
        #
        #   - JSON numbers, strings and arrays
        #   - More invalid characters or malformed structures
        #   - Valid, but not obvious syntax, like zillion of spaces or
        #     strings with unicode chars (different suite maybe?)
        bad_json = []

        # A JSON value MUST be an object, array, number, string, true, false,
        # or null
        #
        # NOTE: QMP seems to ignore a number of chars, like: | and ?
        bad_json.append(":")
        bad_json.append(",")

        # Malformed json-objects
        #
        # NOTE: sending only "}" seems to break QMP
        # NOTE: Duplicate keys are accepted (should it?)
        bad_json.append("{ \"execute\" }")
        bad_json.append("{ \"execute\": \"query-version\", }")
        bad_json.append("{ 1: \"query-version\" }")
        bad_json.append("{ true: \"query-version\" }")
        bad_json.append("{ []: \"query-version\" }")
        bad_json.append("{ {}: \"query-version\" }")

        for cmd in bad_json:
            resp = monitor.cmd_raw(cmd)
            check_error_resp(resp, "JSONParsing")

    def test_id_key(monitor):
        """
        Check that QMP's "id" key is correctly handled.
        """
        # The "id" key must be echoed back in error responses
        id_key = "kvm-autotest"
        resp = monitor.cmd_qmp("eject", {"foobar": True}, id=id_key)
        check_error_resp(resp)
        check_str_key(resp, "id", id_key)

        # The "id" key must be echoed back in success responses
        resp = monitor.cmd_qmp("query-status", id=id_key)
        check_success_resp(resp)
        check_str_key(resp, "id", id_key)

        # The "id" key can be any json-object
        for id_key in [
                True, 1234, "string again!", [1, [], {}, True, "foo"], {
                    "key": {}
                }
        ]:
            resp = monitor.cmd_qmp("query-status", id=id_key)
            check_success_resp(resp)
            if resp["id"] != id_key:
                raise error.TestFail("expected id '%s' but got '%s'" %
                                     (str(id_key), str(resp["id"])))

    def test_invalid_arg_key(monitor):
        """
        Currently, the only supported keys in the input object are: "execute",
        "arguments" and "id". Although expansion is supported, invalid key
        names must be detected.
        """
        resp = monitor.cmd_obj({"execute": "eject", "foobar": True})
        check_error_resp(resp, "QMPExtraInputObjectMember",
                         {"member": "foobar"})

    def test_bad_arguments_key_type(monitor):
        """
        The "arguments" key must be an json-object.

        We use the eject command to perform the tests, but that's a random
        choice, any command that accepts arguments will do, as the command
        doesn't get called.
        """
        for item in [True, [], 1, "foo"]:
            resp = monitor.cmd_obj({"execute": "eject", "arguments": item})
            check_error_resp(resp, "QMPBadInputObjectMember", {
                "member": "arguments",
                "expected": "object"
            })

    def test_bad_execute_key_type(monitor):
        """
        The "execute" key must be a json-string.
        """
        for item in [False, 1, {}, []]:
            resp = monitor.cmd_obj({"execute": item})
            check_error_resp(resp, "QMPBadInputObjectMember", {
                "member": "execute",
                "expected": "string"
            })

    def test_no_execute_key(monitor):
        """
        The "execute" key must exist, we also test for some stupid parsing
        errors.
        """
        for cmd in [{}, {
                "execut": "qmp_capabilities"
        }, {
                "executee": "qmp_capabilities"
        }, {
                "foo": "bar"
        }]:
            resp = monitor.cmd_obj(cmd)
            check_error_resp(resp)  # XXX: check class and data dict?

    def test_bad_input_obj_type(monitor):
        """
        The input object must be... an json-object.
        """
        for cmd in ["foo", [], True, 1]:
            resp = monitor.cmd_obj(cmd)
            check_error_resp(resp, "QMPBadInputObject", {"expected": "object"})

    def test_good_input_obj(monitor):
        """
        Basic success tests for issuing QMP commands.
        """
        # NOTE: We don't use the cmd_qmp() method here because the command
        # object is in a 'random' order
        resp = monitor.cmd_obj({"execute": "query-version"})
        check_success_resp(resp)

        resp = monitor.cmd_obj({"arguments": {}, "execute": "query-version"})
        check_success_resp(resp)

        id = "1234foo"
        resp = monitor.cmd_obj({
            "id": id,
            "execute": "query-version",
            "arguments": {}
        })
        check_success_resp(resp)
        check_str_key(resp, "id", id)

        # TODO: would be good to test simple argument usage, but we don't have
        # a read-only command that accepts arguments.

    def input_object_suite(monitor):
        """
        Check the input object format, as described in the QMP specfication
        section '2.3 Issuing Commands'.

        { "execute": json-string, "arguments": json-object, "id": json-value }
        """
        test_good_input_obj(monitor)
        test_bad_input_obj_type(monitor)
        test_no_execute_key(monitor)
        test_bad_execute_key_type(monitor)
        test_bad_arguments_key_type(monitor)
        test_id_key(monitor)
        test_invalid_arg_key(monitor)

    def argument_checker_suite(monitor):
        """
        Check that QMP's argument checker is detecting all possible errors.

        We use a number of different commands to perform the checks, but the
        command used doesn't matter much as QMP performs argument checking
        _before_ calling the command.
        """
        # stop doesn't take arguments
        resp = monitor.cmd_qmp("stop", {"foo": 1})
        check_error_resp(resp, "InvalidParameter", {"name": "foo"})

        # required argument omitted
        resp = monitor.cmd_qmp("screendump")
        check_error_resp(resp, "MissingParameter", {"name": "filename"})

        # 'bar' is not a valid argument
        resp = monitor.cmd_qmp("screendump", {
            "filename": "outfile",
            "bar": "bar"
        })
        check_error_resp(resp, "InvalidParameter", {"name": "bar"})

        # test optional argument: 'force' is omitted, but it's optional, so
        # the handler has to be called. Test this happens by checking an
        # error that is generated by the handler itself.
        resp = monitor.cmd_qmp("eject", {"device": "foobar"})
        check_error_resp(resp, "DeviceNotFound")

        # filename argument must be a json-string
        for arg in [{}, [], 1, True]:
            resp = monitor.cmd_qmp("screendump", {"filename": arg})
            check_error_resp(resp, "InvalidParameterType", {
                "name": "filename",
                "expected": "string"
            })

        # force argument must be a json-bool
        for arg in [{}, [], 1, "foo"]:
            resp = monitor.cmd_qmp("eject", {"force": arg, "device": "foo"})
            check_error_resp(resp, "InvalidParameterType", {
                "name": "force",
                "expected": "bool"
            })

        # val argument must be a json-int
        for arg in [{}, [], True, "foo"]:
            resp = monitor.cmd_qmp("memsave", {
                "val": arg,
                "filename": "foo",
                "size": 10
            })
            check_error_resp(resp, "InvalidParameterType", {
                "name": "val",
                "expected": "int"
            })

        # value argument must be a json-number
        for arg in [{}, [], True, "foo"]:
            resp = monitor.cmd_qmp("migrate_set_speed", {"value": arg})
            check_error_resp(resp, "InvalidParameterType", {
                "name": "value",
                "expected": "number"
            })

        # qdev-type commands have their own argument checker, all QMP does
        # is to skip its checking and pass arguments through. Check this
        # works by providing invalid options to device_add and expecting
        # an error message from qdev
        resp = monitor.cmd_qmp("device_add", {"driver": "e1000", "foo": "bar"})
        check_error_resp(resp, "PropertyNotFound", {
            "device": "e1000",
            "property": "foo"
        })

    def unknown_commands_suite(monitor):
        """
        Check that QMP handles unknown commands correctly.
        """
        # We also call a HMP-only command, to be sure it will fail as expected
        for cmd in ["bar", "query-", "query-foo", "q", "help"]:
            resp = monitor.cmd_qmp(cmd)
            check_error_resp(resp, "CommandNotFound", {"name": cmd})

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    # Look for the first qmp monitor available, otherwise, fail the test
    qmp_monitor = None
    for m in vm.monitors:
        if isinstance(m, kvm_monitor.QMPMonitor):
            qmp_monitor = m

    if qmp_monitor is None:
        raise error.TestError('Could not find a QMP monitor, aborting test')

    # Run all suites
    greeting_suite(qmp_monitor)
    input_object_suite(qmp_monitor)
    argument_checker_suite(qmp_monitor)
    unknown_commands_suite(qmp_monitor)
    json_parsing_errors_suite(qmp_monitor)

    # check if QMP is still alive
    if not qmp_monitor.is_responsive():
        raise error.TestFail('QMP monitor is not responsive after testing')
    def run_once(self):
        # Check for existing cras crashes which might occur during UI bring up.
        # TODO: (rohitbm) check if we need to reboot the DUT before the test
        #       start to verify cras crashes during boot.
        existing_crash_reports = self.collect_cras_crash()
        if len(existing_crash_reports) == 0:
            self._check['crashes_on_boot'] = True

        # Capturing cras pid before startig the test.
        cras_pid_1 = utils.get_oldest_pid_by_name('/usr/bin/cras')

        with chrome.Chrome(init_network_controller=True) as self._cr:
            # Push the 1st stream
            self.push_new_stream(self._cr.browser.tabs.New())

            # Capturing cras pid before opening a new set of audio streams.
            cras_pid_2 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            # Push the 2nd stream
            self.push_new_stream(self._cr.browser.tabs.New())

            # Let's play audio for sometime to ensure that
            # long playback is good.
            time.sleep(10)

            total_tests = 2
            active_streams = cras_utils.get_active_stream_count()
            logging.debug(
                'Number of active streams after opening all tabs: %d.',
                active_streams)
            if active_streams >= total_tests:
                self._check['stream_activation'] = True

            # Capturing cras pid after opening all audio/video streams.
            cras_pid_3 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            # Close all open audio streams.
            while total_tests > 0:
                self._cr.browser.tabs[total_tests].Close()
                total_tests -= 1
                time.sleep(1)
            active_streams = cras_utils.get_active_stream_count()
            logging.debug(
                'Number of active streams after closing all tabs: %d.',
                active_streams)

            # Capturing cras pid after closing all audio/stream streams.
            cras_pid_4 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            if cras_pid_1 == cras_pid_2 == cras_pid_3 == cras_pid_4:
                self._check['cras_status'] = True

        new_crash_reports = self.collect_cras_crash()
        new_reports = list(
            set(new_crash_reports) - set(existing_crash_reports))
        if len(new_reports) == 0:
            self._check['crashes_at_end'] = True

        err_msg = ''
        if self._check.values().count(False) > 0:
            if not self._check['crashes_on_boot']:
                err_msg = ('1. Found cras crashes on boot: %s.\n' %
                           existing_crash_reports)
            if not self._check['stream_activation']:
                err_msg += ('2. CRAS stream count is not matching with '
                            'number of streams.\n')
            if not self._check['cras_status']:
                err_msg += ('CRAS PID changed during the test. CRAS might be '
                            'crashing while adding/removing streams.\n')
            if not self._check['crashes_at_end']:
                err_msg += ('Found cras crashes at the end of the test : %s.' %
                            new_reports)
            raise error.TestError(err_msg)
 def _key_action(self, key, action_type):
     if not key in KEYMATRIX:
         raise error.TestError('Unknown key: ' + key)
     row, col = KEYMATRIX[key]
     self.ec_command('kbpress %d %d %d' % (row, col, action_type))
    def run_once(self,
                 num_temp_sensor=0,
                 temp_sensor_to_test=None,
                 test_fan=False,
                 fan_rpm_error_margin=200,
                 test_battery=None,
                 test_lightbar=False,
                 fan_delay_secs=3):

        ec = cros_ec.EC()

        if not cros_ec.has_ectool() or not ec.hello(ignore_status=True):
            raise error.TestNAError('No support for Google EC')

        if test_battery is None:
            test_battery = power_utils.has_battery()

        if test_fan:
            try:
                ec.set_fanspeed(10000)
                time.sleep(fan_delay_secs)
                max_reading = ec.get_fanspeed()
                if max_reading == 0:
                    raise error.TestError('Unable to start fan')

                target_fanspeed = max_reading / 2
                ec.set_fanspeed(target_fanspeed)
                time.sleep(fan_delay_secs)
                current_reading = ec.get_fanspeed()

                # Sometimes the actual fan speed is close but not equal to
                # the target speed, so we add some error margin here.
                lower_bound = target_fanspeed - fan_rpm_error_margin
                upper_bound = target_fanspeed + fan_rpm_error_margin
                if not (lower_bound <= current_reading <= upper_bound):
                    raise error.TestError('Unable to set fan speed')
            finally:
                ec.auto_fan_ctrl()

        if temp_sensor_to_test is None:
            temp_sensor_to_test = list(range(num_temp_sensor))

        for idx in temp_sensor_to_test:
            temperature = ec.get_temperature(idx) - 273
            if temperature < 0 or temperature > 100:
                raise error.TestError(
                        'Abnormal temperature reading on sensor %d' % idx)

        if test_battery:
            try:
                logging.info('Battery temperature %d K',
                             ec.get_temperature(name='Battery'))
            except cros_ec.ECError as e:
                logging.debug('ECError: %s', e)
                logging.warning('No battery temperature via EC.')

            try:
                if not ec.get_battery():
                    raise error.TestError('Battery communication failed')
            except cros_ec.ECError as e:
                logging.debug('ECError: %s', e)
                logging.warning('No battery info via EC.')

        if test_lightbar and not ec.get_lightbar():
            raise error.TestError('Lightbar communication failed')
Exemple #14
0
 def ec_hibernate(self):
     """Put the EC in hibernate"""
     self.ec.send_command('hibernate')
     if self.ec_is_up():
         raise error.TestError('Could not put the EC into hibernate')
Exemple #15
0
    def set_install_params(self, test, params):
        """
        Initializes class attributes, and retrieves KVM code.

        @param test: kvm test object
        @param params: Dictionary with test arguments
        """
        super(SourceDirInstaller, self).set_install_params(test, params)

        self.mod_install_dir = os.path.join(self.prefix, 'modules')
        self.installed_kmods = False  # it will be set to True in case we
        # installed our own modules

        srcdir = params.get("srcdir", None)
        self.path_to_roms = params.get("path_to_rom_images", None)

        if self.install_mode == 'localsrc':
            if srcdir is None:
                raise error.TestError("Install from source directory specified"
                                      "but no source directory provided on the"
                                      "control file.")
            else:
                shutil.copytree(srcdir, self.srcdir)

        if self.install_mode == 'release':
            release_tag = params.get("release_tag")
            release_dir = params.get("release_dir")
            release_listing = params.get("release_listing")
            logging.info("Installing KVM from release tarball")
            if not release_tag:
                release_tag = kvm_utils.get_latest_kvm_release_tag(
                    release_listing)
            tarball = os.path.join(release_dir, 'kvm', release_tag,
                                   "kvm-%s.tar.gz" % release_tag)
            logging.info("Retrieving release kvm-%s" % release_tag)
            tarball = utils.unmap_url("/", tarball, "/tmp")

        elif self.install_mode == 'snapshot':
            logging.info("Installing KVM from snapshot")
            snapshot_dir = params.get("snapshot_dir")
            if not snapshot_dir:
                raise error.TestError("Snapshot dir not provided")
            snapshot_date = params.get("snapshot_date")
            if not snapshot_date:
                # Take yesterday's snapshot
                d = (datetime.date.today() -
                     datetime.timedelta(1)).strftime("%Y%m%d")
            else:
                d = snapshot_date
            tarball = os.path.join(snapshot_dir, "kvm-snapshot-%s.tar.gz" % d)
            logging.info("Retrieving kvm-snapshot-%s" % d)
            tarball = utils.unmap_url("/", tarball, "/tmp")

        elif self.install_mode == 'localtar':
            tarball = params.get("tarball")
            if not tarball:
                raise error.TestError("KVM Tarball install specified but no"
                                      " tarball provided on control file.")
            logging.info("Installing KVM from a local tarball")
            logging.info("Using tarball %s")
            tarball = utils.unmap_url("/", params.get("tarball"), "/tmp")

        if self.install_mode in ['release', 'snapshot', 'localtar']:
            utils.extract_tarball_to_dir(tarball, self.srcdir)

        if self.install_mode in ['release', 'snapshot', 'localtar', 'srcdir']:
            self.repo_type = kvm_utils.check_kvm_source_dir(self.srcdir)
            configure_script = os.path.join(self.srcdir, 'configure')
            self.configure_options = check_configure_options(configure_script)
Exemple #16
0
 def scenario_suspend_mobile_enabled(self, **kwargs):
     device = self.__get_mobile_device()
     self.enable_device(device, True)
     if not self.mobile_service_available():
         raise error.TestError('Unable to find mobile service.')
     self.suspend_resume(20)
Exemple #17
0
    def _pull_code(self):
        """
        Retrieves code from git repositories.
        """
        params = self.params

        kernel_repo = params.get("git_repo")
        user_repo = params.get("user_git_repo")
        kmod_repo = params.get("kmod_repo")

        kernel_branch = params.get("kernel_branch", "master")
        user_branch = params.get("user_branch", "master")
        kmod_branch = params.get("kmod_branch", "master")

        kernel_lbranch = params.get("kernel_lbranch", "master")
        user_lbranch = params.get("user_lbranch", "master")
        kmod_lbranch = params.get("kmod_lbranch", "master")

        kernel_commit = params.get("kernel_commit", None)
        user_commit = params.get("user_commit", None)
        kmod_commit = params.get("kmod_commit", None)

        kernel_patches = eval(params.get("kernel_patches", "[]"))
        user_patches = eval(params.get("user_patches", "[]"))
        kmod_patches = eval(params.get("user_patches", "[]"))

        if not user_repo:
            message = "KVM user git repository path not specified"
            logging.error(message)
            raise error.TestError(message)

        userspace_srcdir = os.path.join(self.srcdir, "kvm_userspace")
        kvm_utils.get_git_branch(user_repo, user_branch, userspace_srcdir,
                                 user_commit, user_lbranch)
        self.userspace_srcdir = userspace_srcdir

        if user_patches:
            os.chdir(self.userspace_srcdir)
            for patch in user_patches:
                utils.get_file(
                    patch,
                    os.path.join(self.userspace_srcdir,
                                 os.path.basename(patch)))
                utils.system('patch -p1 %s' % os.path.basename(patch))

        if kernel_repo:
            kernel_srcdir = os.path.join(self.srcdir, "kvm")
            kvm_utils.get_git_branch(kernel_repo, kernel_branch, kernel_srcdir,
                                     kernel_commit, kernel_lbranch)
            self.kernel_srcdir = kernel_srcdir
            if kernel_patches:
                os.chdir(self.kernel_srcdir)
                for patch in kernel_patches:
                    utils.get_file(
                        patch,
                        os.path.join(self.userspace_srcdir,
                                     os.path.basename(patch)))
                    utils.system('patch -p1 %s' % os.path.basename(patch))
        else:
            self.kernel_srcdir = None

        if kmod_repo:
            kmod_srcdir = os.path.join(self.srcdir, "kvm_kmod")
            kvm_utils.get_git_branch(kmod_repo, kmod_branch, kmod_srcdir,
                                     kmod_commit, kmod_lbranch)
            self.kmod_srcdir = kmod_srcdir
            if kmod_patches:
                os.chdir(self.kmod_srcdir)
                for patch in kmod_patches:
                    utils.get_file(
                        patch,
                        os.path.join(self.userspace_srcdir,
                                     os.path.basename(patch)))
                    utils.system('patch -p1 %s' % os.path.basename(patch))
        else:
            self.kmod_srcdir = None

        configure_script = os.path.join(self.userspace_srcdir, 'configure')
        self.configure_options = check_configure_options(configure_script)
    def run_once(self, host, client_autotest, repeat, network_debug):
        self.has_lid = True

        # Check if DUT has lid.
        if host.servo.get('lid_open') == 'not_applicable':
            self.has_lid = False
        else:
            # Check if
            host.servo.lid_open()
            if host.servo.get('lid_open') != 'yes':
                raise error.TestError('SERVO has a bad lid_open control')

        autotest_client = autotest.Autotest(host)
        diff_list = []
        off_list = []
        # The servo hubs come up as diffs in connected components. These
        # should be ignored for this test.
        servo_hardware_prefix = 'Standard Microsystems Corp.'
        self.is_suspended = False

        def strip_lsusb_output(lsusb_output):
            """Finds the external USB devices plugged

            @param lsusb_output: lsusb command output to parse

            @returns plugged_list: List of plugged usb devices names

            """
            items = lsusb_output.split('\n')
            named_list = []
            unnamed_device_count = 0
            for item in items:
                columns = item.split(' ')
                if len(columns) == 6 or len(' '.join(
                        columns[6:]).strip()) == 0:
                    logging.debug(
                        'Unnamed device located, adding generic name.')
                    name = 'Unnamed device %d' % unnamed_device_count
                    unnamed_device_count += 1
                else:
                    name = ' '.join(columns[6:]).strip()
                if not name.startswith(servo_hardware_prefix):
                    named_list.append(name)
            return named_list

        def set_hub_power(on=True):
            """Turns on or off the USB hub (dut_hub1_rst1).

            @param on: To power on the servo-usb hub or not

            @returns usb devices list if not suspended, None if suspended
            """
            reset = 'off'
            if not on:
                reset = 'on'
            host.servo.set('dut_hub1_rst1', reset)
            time.sleep(_WAIT_DELAY)

        def wait_to_detect(timeout=_LONG_TIMEOUT):
            """Waits till timeout for set of peripherals in lsusb output.

            @param timeout: timeout in seconds

            @raise error.TestFail: if timeout is reached

            """
            start_time = int(time.time())
            while True:
                connected = strip_lsusb_output(
                    host.run('lsusb').stdout.strip())
                if diff_list.issubset(connected):
                    break
                elif int(time.time()) - start_time > timeout:
                    raise error.TestFail('USB peripherals not detected: %s' %
                                         str(diff_list.difference(connected)))
                time.sleep(1)

        def test_suspend(plugged_before_suspended=False,
                         plugged_before_resume=False):
            """Close and open lid while different USB plug status.

            @param plugged_before_suspended: USB plugged before suspended
            @param plugged_before_resume: USB plugged after suspended


            @raise error.TestFail: if USB peripherals do not match expectations.

            """
            set_hub_power(plugged_before_suspended)

            # Suspend
            boot_id = host.get_boot_id()
            if self.has_lid:
                host.servo.lid_close()
            else:
                thread = threading.Thread(target=host.suspend)
                thread.start()
            host.test_wait_for_sleep(_LONG_TIMEOUT)
            logging.debug(' --DUT suspended')
            self.is_suspended = True

            if plugged_before_resume is not plugged_before_suspended:
                set_hub_power(plugged_before_resume)

            # Resume
            if self.has_lid:
                host.servo.lid_open()
            else:
                host.servo.power_key(_WAKE_PRESS_IN_SEC)
            host.test_wait_for_resume(boot_id, _LONG_TIMEOUT)
            logging.debug(' --DUT resumed')
            self.is_suspended = False

            if not plugged_before_resume:
                time.sleep(_WAIT_DELAY)
                connected = strip_lsusb_output(
                    host.run('lsusb').stdout.strip())
                if connected != off_list:
                    raise error.TestFail('Devices were not removed on wake.')
            else:
                wait_to_detect(_LONG_TIMEOUT)

        def test_hotplug():
            """Testing unplug-plug and check for expected peripherals.

             @raise error.TestFail: if USB peripherals do not match expectations.

            """
            set_hub_power(False)
            set_hub_power(True)
            wait_to_detect(_LONG_TIMEOUT)

        def stress_external_usb():
            """Test procedures in one iteration."""

            # Unplug/plug
            test_hotplug()

            # Suspend/resume as unplugged
            test_suspend()

            # Plug/close_lid/unplug/open_lid
            test_suspend(plugged_before_suspended=True)

            #Unplug/close_lid/plug/open_lid
            test_suspend(plugged_before_resume=True)

            # Suspend/resume as plugged
            test_suspend(plugged_before_suspended=True,
                         plugged_before_resume=True)

        host.servo.switch_usbkey('dut')

        # There are some mice that need the data and power connection to both
        # be removed, otherwise they won't come back up.  This means that the
        # external devices should only use the usb connections labeled:
        # USB_KEY and DUT_HUB1_USB.
        set_hub_power(False)
        time.sleep(_WAIT_DELAY)
        off_list = strip_lsusb_output(host.run('lsusb').stdout.strip())
        set_hub_power(True)
        time.sleep(_WAIT_DELAY * 2)
        connected = strip_lsusb_output(host.run('lsusb').stdout.strip())
        diff_list = set(connected).difference(set(off_list))
        if len(diff_list) == 0:
            raise error.TestError('No connected devices were detected.  Make '
                                  'sure the devices are connected to USB_KEY '
                                  'and DUT_HUB1_USB on the servo board.')
        logging.debug('Connected devices list: %s', diff_list)

        autotest_client.run_test(client_autotest, exit_without_logout=True)
        for iteration in xrange(1, repeat + 1):
            logging.debug('---Iteration %d/%d' % (iteration, repeat))
            stress_external_usb()
Exemple #19
0
 def disable_loopback(self):
     # Try mac loopback first then phy loopback
     # If both fail, raise an error
     if (self._set_loopback('phyint', 'disable') > 0 and
         self._set_loopback('mac', 'disable') > 0):
         raise error.TestError('Unable to disable loopback')
 def verify_enrollment(self, user_id):
     """Verifies enterprise enrollment using /home/.shadow config."""
     with open('/home/.shadow/install_attributes.pb') as f:
         if not user_id in f.read():
             raise error.TestError('Device is not enrolled or '
                                   'enterprise owned.')
Exemple #21
0
 def is_enabled(self):
     raise error.TestError('Undefined')
    def run_once(self,
                 host,
                 client_autotest,
                 action_sequence,
                 repeat,
                 usb_list=None,
                 usb_checks=None,
                 crash_check=False):
        self.client_autotest = client_autotest
        self.host = host
        self.autotest_client = autotest.Autotest(self.host)
        self.usb_list = usb_list
        self.usb_checks = usb_checks
        self.crash_check = crash_check

        self.suspend_status = False
        self.login_status = False
        self.fail_reasons = list()
        self.action_step = None

        self.host.servo.switch_usbkey('dut')
        self.host.servo.set('usb_mux_sel3', 'dut_sees_usbkey')
        time.sleep(_WAIT_DELAY)

        # Collect USB peripherals when unplugged
        self.set_hub_power(False)
        time.sleep(_WAIT_DELAY)
        off_list = self.getPluggedUsbDevices()

        # Collect USB peripherals when plugged
        self.set_hub_power(True)
        time.sleep(_WAIT_DELAY * 2)
        on_list = self.getPluggedUsbDevices()

        self.diff_list = set(on_list).difference(set(off_list))
        if len(self.diff_list) == 0:
            # Fail if no devices detected after
            raise error.TestError('No connected devices were detected. Make '
                                  'sure the devices are connected to USB_KEY '
                                  'and DUT_HUB1_USB on the servo board.')
        logging.debug('Connected devices list: %s', self.diff_list)

        board = host.get_board().split(':')[1]
        action_sequence = action_sequence.upper()
        actions = action_sequence.split(',')
        boot_id = 0
        self.remove_crash_data()

        for iteration in xrange(1, repeat + 1):
            step = 0
            for action in actions:
                step += 1
                action = action.strip()
                self.action_step = 'STEP %d.%d. %s' % (iteration, step, action)
                logging.info(self.action_step)

                if action == 'RESUME':
                    self.action_resume(boot_id)
                    time.sleep(_WAIT_DELAY)
                elif action == 'UNPLUG':
                    self.set_hub_power(False)
                elif action == 'PLUG':
                    self.set_hub_power(True)
                elif self.suspend_status == False:
                    if action.startswith('LOGIN'):
                        if self.login_status:
                            logging.debug('Skipping login. Already logged in.')
                            continue
                        else:
                            self.action_login()
                            self.login_status = True
                    elif action == 'REBOOT':
                        self.host.reboot()
                        time.sleep(_WAIT_DELAY * 3)
                        self.login_status = False
                    elif action == 'SUSPEND':
                        boot_id = self.action_suspend()
                else:
                    logging.info('WRONG ACTION: %s .', self.action_step)

                self.check_status()

            if self.fail_reasons:
                raise error.TestFail('Failures reported: %s' %
                                     str(self.fail_reasons))
Exemple #23
0
 def disable(self):
     raise error.TestError('Undefined')
Exemple #24
0
    def _run_crasher_process(self,
                             username,
                             cause_crash=True,
                             consent=True,
                             crasher_path=None,
                             run_crasher=None,
                             expected_uid=None,
                             expected_gid=None,
                             expected_exit_code=None,
                             expected_reason=None):
        """Runs the crasher process.

        Will wait up to 10 seconds for crash_reporter to report the crash.
        crash_reporter_caught will be marked as true when the "Received crash
        notification message..." appears. While associated logs are likely to be
        available at this point, the function does not guarantee this.

        @param username: Unix user of the crasher process.
        @param cause_crash: Whether the crasher should crash.
        @param consent: Whether the user consents to crash reporting.
        @param crasher_path: Path to which the crasher should be copied before
                             execution. Relative to |_root_path|.
        @param run_crasher: A closure to override the default |crasher_command|
                            invocation. It should return a tuple describing the
                            process, where |pid| can be None if it should be
                            parsed from the |output|:

            def run_crasher(username, crasher_command):
                ...
                return (exit_code, output, pid)

        @param expected_uid: The uid the crash happens under.
        @param expected_gid: The gid the crash happens under.
        @param expected_exit_code:
        @param expected_reason:
            Expected information in crash_reporter log message.

        @returns:
          A dictionary with keys:
            returncode: return code of the crasher
            crashed: did the crasher return segv error code
            crash_reporter_caught: did crash_reporter catch a segv
            output: stderr output of the crasher process
        """
        if crasher_path is None:
            crasher_path = self._crasher_path
        else:
            dest = os.path.join(self._root_path,
                                crasher_path[os.path.isabs(crasher_path):])

            utils.system('cp -a "%s" "%s"' % (self._crasher_path, dest))

        self.enable_crash_filtering(os.path.basename(crasher_path))

        crasher_command = []

        if username == 'root':
            if expected_exit_code is None:
                expected_exit_code = -signal.SIGSEGV
        else:
            if expected_exit_code is None:
                expected_exit_code = 128 + signal.SIGSEGV

            if not run_crasher:
                crasher_command.extend(['su', username, '-c'])

        crasher_command.append(crasher_path)
        basename = os.path.basename(crasher_path)
        if not cause_crash:
            crasher_command.append('--nocrash')
        self._set_consent(consent)

        logging.debug('Running crasher: %s', crasher_command)

        if run_crasher:
            (exit_code, output, pid) = run_crasher(username, crasher_command)

        else:
            crasher = subprocess.Popen(crasher_command,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)

            output = crasher.communicate()[1]
            exit_code = crasher.returncode
            pid = None

        logging.debug('Crasher output:\n%s', output)

        if pid is None:
            # Get the PID from the output, since |crasher.pid| may be su's PID.
            match = re.search(r'pid=(\d+)', output)
            if not match:
                raise error.TestFail('Missing PID in crasher output')
            pid = int(match.group(1))

        if expected_uid is None:
            expected_uid = pwd.getpwnam(username).pw_uid

        if expected_gid is None:
            expected_gid = pwd.getpwnam(username).pw_gid

        if expected_reason is None:
            expected_reason = 'handling' if consent else 'ignoring - no consent'

        expected_message = (
            ('[%s] Received crash notification for %s[%d] sig 11, user %d '
             'group %d (%s)') % (self._expected_tag, basename, pid,
                                 expected_uid, expected_gid, expected_reason))

        # Wait until no crash_reporter is running.
        utils.poll_for_condition(
            lambda: utils.system('pgrep -f crash_reporter.*:%s' % basename,
                                 ignore_status=True) != 0,
            timeout=10,
            exception=error.TestError(
                'Timeout waiting for crash_reporter to finish: ' +
                self._log_reader.get_logs()))

        is_caught = False
        try:
            utils.poll_for_condition(
                lambda: self._log_reader.can_find(expected_message),
                timeout=5,
                desc='Logs contain crash_reporter message: ' +
                expected_message)
            is_caught = True
        except utils.TimeoutError:
            pass

        result = {
            'crashed': exit_code == expected_exit_code,
            'crash_reporter_caught': is_caught,
            'output': output,
            'returncode': exit_code
        }
        logging.debug('Crasher process result: %s', result)
        return result
Exemple #25
0
def runtest(job,
            url,
            tag,
            args,
            dargs,
            local_namespace={},
            global_namespace={},
            before_test_hook=None,
            after_test_hook=None,
            before_iteration_hook=None,
            after_iteration_hook=None):
    local_namespace = local_namespace.copy()
    global_namespace = global_namespace.copy()
    # if this is not a plain test name then download and install the
    # specified test
    if url.endswith('.tar.bz2'):
        (testgroup, testname) = _installtest(job, url)
        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
        importdir = os.path.join(job.testdir, 'download')
        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
        classname = '%s.%s' % (modulename, testname)
        path = testname
    else:
        # If the test is local, it may be under either testdir or site_testdir.
        # Tests in site_testdir override tests defined in testdir
        testname = path = url
        testgroup = ''
        path = re.sub(':', '/', testname)
        modulename = os.path.basename(path)
        classname = '%s.%s' % (modulename, modulename)

        # Try installing the test package
        # The job object may be either a server side job or a client side job.
        # 'install_pkg' method will be present only if it's a client side job.
        if hasattr(job, 'install_pkg'):
            try:
                bindir = os.path.join(job.testdir, testname)
                job.install_pkg(testname, 'test', bindir)
            except error.PackageInstallError:
                # continue as a fall back mechanism and see if the test code
                # already exists on the machine
                pass

        bindir = None
        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
            if dir is not None and os.path.exists(os.path.join(dir, path)):
                importdir = bindir = os.path.join(dir, path)
        if not bindir:
            raise error.TestError(testname + ': test does not exist')

    subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
    outputdir = os.path.join(job.resultdir, subdir)
    if tag:
        outputdir += '.' + tag

    local_namespace['job'] = job
    local_namespace['bindir'] = bindir
    local_namespace['outputdir'] = outputdir

    sys.path.insert(0, importdir)
    try:
        exec('import %s' % modulename, local_namespace, global_namespace)
        exec("mytest = %s(job, bindir, outputdir)" % classname,
             local_namespace, global_namespace)
    finally:
        sys.path.pop(0)

    pwd = os.getcwd()
    os.chdir(outputdir)

    try:
        mytest = global_namespace['mytest']
        mytest.success = False
        if not job.fast and before_test_hook:
            logging.info('Starting before_hook for %s', mytest.tagged_testname)
            with metrics.SecondsTimer(
                    'chromeos/autotest/job/before_hook_duration'):
                before_test_hook(mytest)
            logging.info('before_hook completed')

        # we use the register iteration hooks methods to register the passed
        # in hooks
        if before_iteration_hook:
            mytest.register_before_iteration_hook(before_iteration_hook)
        if after_iteration_hook:
            mytest.register_after_iteration_hook(after_iteration_hook)
        mytest._exec(args, dargs)
        mytest.success = True
    finally:
        os.chdir(pwd)
        if after_test_hook and (not mytest.success or not job.fast):
            logging.info('Starting after_hook for %s', mytest.tagged_testname)
            with metrics.SecondsTimer(
                    'chromeos/autotest/job/after_hook_duration'):
                after_test_hook(mytest)
            logging.info('after_hook completed')

        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
Exemple #26
0
    def run_once(self, host, reboots):
        reboots = int(reboots)
        self.client = host
        # The servo hubs come up as diffs in connected components.  These
        # should be ignored for this test.  It is a list so when servo next
        # is available it may have a differnet hub which can be appended.
        servo_hardware_list = ['Standard Microsystems Corp.']

        def strip_lsusb_output(lsusb_output):
            items = lsusb_output.split('\n')
            named_list = []
            unnamed_device_count = 0
            for item in items:
                columns = item.split(' ')
                if len(columns) == 6 or len(' '.join(
                        columns[6:]).strip()) == 0:
                    logging.info(
                        'Unnamed device located, adding generic name.')
                    name = 'Unnamed device %d' % unnamed_device_count
                    unnamed_device_count += 1
                else:
                    name = ' '.join(columns[6:]).strip()
                if name not in servo_hardware_list:
                    named_list.append(name)
            return named_list

        def set_hub_power(on=True, check_host_detection=False):
            reset = 'off'
            if not on:
                reset = 'on'
            host.servo.set('dut_hub1_rst1', reset)
            if check_host_detection:
                time.sleep(_WAIT_DELAY)
                return strip_lsusb_output(host.run('lsusb').stdout.strip())

        def stress_hotplug():
            # Devices need some time to come up and to be recognized.  However
            # this is a stress test so we want to move reasonably fast.
            time.sleep(2)
            removed = set_hub_power(False)
            time.sleep(1)
            connected = set_hub_power()

        host.servo.switch_usbkey('dut')
        host.servo.set('usb_mux_sel3', 'dut_sees_usbkey')

        # There are some mice that need the data and power connection to both
        # be removed, otherwise they won't come back up.  This means that the
        # external devices should only use the usb connections labeled:
        # USB_KEY and DUT_HUB1_USB.
        connected = set_hub_power(check_host_detection=True)
        off_list = set_hub_power(on=False, check_host_detection=True)
        diff_list = set(connected).difference(set(off_list))
        if len(diff_list) == 0:
            raise error.TestError('No connected devices were detected.  Make '
                                  'sure the devices are connected to USB_KEY '
                                  'and DUT_HUB1_USB on the servo board.')
        logging.info('Connected devices list: %s' % diff_list)
        set_hub_power(True)

        lsb_release = host.run('cat /etc/lsb-release').stdout.split('\n')
        unsupported_gbb_boards = ['x86-mario', 'x86-alex', 'x86-zgb']
        skip_gbb = False
        for line in lsb_release:
            m = re.match(r'^CHROMEOS_RELEASE_BOARD=(.+)$', line)
            if m and m.group(1) in unsupported_gbb_boards:
                skip_gbb = True
                break

        logging.info('Rebooting the device %d time(s)' % reboots)
        for i in xrange(reboots):
            # We want fast boot past the dev screen
            if not skip_gbb:
                host.run('/usr/share/vboot/bin/set_gbb_flags.sh 0x01')
            stressor = stress.ControlledStressor(stress_hotplug)
            logging.info('Reboot iteration %d of %d' % (i + 1, reboots))
            if skip_gbb:
                # For devices that do not support gbb we have servo
                # accelerate booting through dev mode.
                host.servo.get_power_state_controller().reset()
                host.servo.power_short_press()
                time.sleep(servo.Servo.BOOT_DELAY)
                host.servo.ctrl_d()
                stressor.start()
                host.wait_up(timeout=120)
            else:
                stressor.start()
                self.client.reboot()
            logging.info('Reboot complete, shutting down stressor.')
            stressor.stop()
            connected_now = set_hub_power(check_host_detection=True)
            diff_now = set(connected_now).difference(set(off_list))
            if diff_list != diff_now:
                raise error.TestFail('The list of connected items does not '
                                     'match the master list.\nMaster: %s\n'
                                     'Current: %s' % (diff_list, diff_now))
            logging.info('Connected devices for iteration %d: %s' %
                         (i, diff_now))
Exemple #27
0
    def run(self, job, batch_size=10, tries=10, capturer_hostname=None,
            conn_worker=None, work_client_hostname=None,
            disabled_sysinfo=False):
        """Executes Chaos test.

        @param job: an Autotest job object.
        @param batch_size: an integer, max number of APs to lock in one batch.
        @param tries: an integer, number of iterations to run per AP.
        @param capturer_hostname: a string or None, hostname or IP of capturer.
        @param conn_worker: ConnectionWorkerAbstract or None, to run extra
                            work after successful connection.
        @param work_client_hostname: a string or None, hostname of work client
        @param disabled_sysinfo: a bool, disable collection of logs from DUT.


        @raises TestError: Issues locking VM webdriver instance
        """

        lock_manager = host_lock_manager.HostLockManager()
        webdriver_master = hosts.SSHHost(MASTERNAME, user='******')
        host_prefix = self._host.hostname.split('-')[0]
        with host_lock_manager.HostsLockedBy(lock_manager):
            capture_host = utils.allocate_packet_capturer(
                    lock_manager, hostname=capturer_hostname,
                    prefix=host_prefix)
            # Cleanup and reboot packet capturer before the test.
            utils.sanitize_client(capture_host)
            capturer = site_linux_system.LinuxSystem(capture_host, {},
                                                     'packet_capturer')

            # Run iw scan and abort if more than allowed number of APs are up.
            iw_command = iw_runner.IwRunner(capture_host)
            start_time = time.time()
            logging.info('Performing a scan with a max timeout of 30 seconds.')
            capture_interface = 'wlan0'
            capturer_info = capture_host.run('cat /etc/lsb-release',
                                             ignore_status=True, timeout=5).stdout
            if 'whirlwind' in capturer_info:
                # Use the dual band aux radio for scanning networks.
                capture_interface = 'wlan2'
            while time.time() - start_time <= ap_constants.MAX_SCAN_TIMEOUT:
                networks = iw_command.scan(capture_interface)
                if networks is None:
                    if (time.time() - start_time ==
                            ap_constants.MAX_SCAN_TIMEOUT):
                        raise error.TestError(
                            'Packet capturer is not responding to scans. Check'
                            'device and re-run test')
                    continue
                elif len(networks) < ap_constants.MAX_SSID_COUNT:
                    break
                elif len(networks) >= ap_constants.MAX_SSID_COUNT:
                    raise error.TestError(
                        'Probably someone is already running a '
                        'chaos test?!')

            if conn_worker is not None:
                work_client_machine = utils.allocate_packet_capturer(
                        lock_manager, hostname=work_client_hostname)
                conn_worker.prepare_work_client(work_client_machine)

            # Lock VM. If on, power off; always power on. Then create a tunnel.
            webdriver_instance = utils.allocate_webdriver_instance(lock_manager)

            if utils.is_VM_running(webdriver_master, webdriver_instance):
                logging.info('VM %s was on; powering off for a clean instance',
                             webdriver_instance)
                utils.power_off_VM(webdriver_master, webdriver_instance)
                logging.info('Allow VM time to gracefully shut down')
                time.sleep(5)

            logging.info('Starting up VM %s', webdriver_instance)
            utils.power_on_VM(webdriver_master, webdriver_instance)
            logging.info('Allow VM time to power on before creating a tunnel.')
            time.sleep(30)

            if not client_utils.host_is_in_lab_zone(webdriver_instance.hostname):
                self._ap_spec._webdriver_hostname = webdriver_instance.hostname
            else:
                # If in the lab then port forwarding must be done so webdriver
                # connection will be over localhost.
                self._ap_spec._webdriver_hostname = 'localhost'
                webdriver_tunnel = webdriver_instance.create_ssh_tunnel(
                                                WEBDRIVER_PORT, WEBDRIVER_PORT)
                logging.info('Wait for tunnel to be created.')
                for i in range(3):
                    time.sleep(10)
                    results = client_utils.run('lsof -i:%s' % WEBDRIVER_PORT,
                                             ignore_status=True)
                    if results:
                        break
                if not results:
                    raise error.TestError(
                            'Unable to listen to WEBDRIVER_PORT: %s', results)

            batch_locker = ap_batch_locker.ApBatchLocker(
                    lock_manager, self._ap_spec,
                    ap_test_type=ap_constants.AP_TEST_TYPE_CHAOS)

            while batch_locker.has_more_aps():
                # Work around for CrOS devices only:crbug.com/358716
                utils.sanitize_client(self._host)
                healthy_dut = True

                with contextlib.closing(wifi_client.WiFiClient(
                    hosts.create_host(
                            {
                                    'hostname' : self._host.hostname,
                                    'afe_host' : self._host._afe_host,
                                    'host_info_store':
                                            self._host.host_info_store,
                            },
                            host_class=self._host.__class__,
                    ),
                    './debug',
                    False,
                )) as client:

                    aps = batch_locker.get_ap_batch(batch_size=batch_size)
                    if not aps:
                        logging.info('No more APs to test.')
                        break

                    # Power down all of the APs because some can get grumpy
                    # if they are configured several times and remain on.
                    # User the cartridge to down group power downs and
                    # configurations.
                    utils.power_down_aps(aps, self._broken_pdus)
                    utils.configure_aps(aps, self._ap_spec, self._broken_pdus)

                    aps = utils.filter_quarantined_and_config_failed_aps(aps,
                            batch_locker, job, self._broken_pdus)

                    for ap in aps:
                        # http://crbug.com/306687
                        if ap.ssid == None:
                            logging.error('The SSID was not set for the AP:%s',
                                          ap)

                        healthy_dut = utils.is_dut_healthy(client, ap)

                        if not healthy_dut:
                            logging.error('DUT is not healthy, rebooting.')
                            batch_locker.unlock_and_reclaim_aps()
                            break

                        networks = utils.return_available_networks(
                                ap, capturer, job, self._ap_spec)

                        if networks is None:
                            # If scan returned no networks, iw scan failed.
                            # Reboot the packet capturer device and
                            # reconfigure the capturer.
                            batch_locker.unlock_and_reclaim_ap(ap.host_name)
                            logging.error('Packet capture is not healthy, '
                                          'rebooting.')
                            capturer.host.reboot()
                            capturer = site_linux_system.LinuxSystem(
                                           capture_host, {},'packet_capturer')
                            continue
                        if networks == list():
                           # Packet capturer did not find the SSID in scan or
                           # there was a security mismatch.
                           utils.release_ap(ap, batch_locker, self._broken_pdus)
                           continue

                        assoc_params = ap.get_association_parameters()

                        if not utils.is_conn_worker_healthy(
                                conn_worker, ap, assoc_params, job):
                            utils.release_ap(
                                    ap, batch_locker, self._broken_pdus)
                            continue

                        name = ap.name
                        kernel_ver = self._host.get_kernel_ver()
                        firmware_ver = utils.get_firmware_ver(self._host)
                        if not firmware_ver:
                            firmware_ver = "Unknown"

                        debug_dict = {'+++PARSE DATA+++': '+++PARSE DATA+++',
                                      'SSID': ap._ssid,
                                      'DUT': client.wifi_mac,
                                      'AP Info': ap.name,
                                      'kernel_version': kernel_ver,
                                      'wifi_firmware_version': firmware_ver}
                        debug_string = pprint.pformat(debug_dict)

                        logging.info('Waiting %d seconds for the AP dhcp '
                                     'server', ap.dhcp_delay)
                        time.sleep(ap.dhcp_delay)

                        result = job.run_test(self._test,
                                     capturer=capturer,
                                     capturer_frequency=networks[0].frequency,
                                     capturer_ht_type=networks[0].ht,
                                     host=self._host,
                                     assoc_params=assoc_params,
                                     client=client,
                                     tries=tries,
                                     debug_info=debug_string,
                                     # Copy all logs from the system
                                     disabled_sysinfo=disabled_sysinfo,
                                     conn_worker=conn_worker,
                                     tag=ap.ssid if conn_worker is None else
                                         '%s.%s' % (conn_worker.name, ap.ssid))

                        utils.release_ap(ap, batch_locker, self._broken_pdus)

                        if conn_worker is not None:
                            conn_worker.cleanup()

                    if not healthy_dut:
                        continue

                batch_locker.unlock_aps()

            if webdriver_tunnel:
                webdriver_instance.disconnect_ssh_tunnel(webdriver_tunnel,
                                                         WEBDRIVER_PORT)
                webdriver_instance.close()
            capturer.close()
            logging.info('Powering off VM %s', webdriver_instance)
            utils.power_off_VM(webdriver_master, webdriver_instance)
            lock_manager.unlock(webdriver_instance.hostname)

            if self._broken_pdus:
                logging.info('PDU is down!!!\nThe following PDUs are down:\n')
                pprint.pprint(self._broken_pdus)

            factory = ap_configurator_factory.APConfiguratorFactory(
                    ap_constants.AP_TEST_TYPE_CHAOS)
            factory.turn_off_all_routers(self._broken_pdus)
Exemple #28
0
        return self.is_cgroup(pid, self.root)

    def set_cgroup(self, pid, pwd):
        """
        Sets cgroup membership
        @param pid: pid of the process
        @param pwd: cgroup directory
        """
        if isinstance(pwd, int):
            pwd = self.cgroups[pwd]
        try:
            open(pwd + '/tasks', 'w').write(str(pid))
        except Exception, inst:
            raise error.TestError("cg.set_cgroup(): %s" % inst)
        if self.is_cgroup(pid, pwd):
            raise error.TestError("cg.set_cgroup(): Setting %d pid into %s "
                                  "cgroup failed" % (pid, pwd))

    def set_root_cgroup(self, pid):
        """
        Resets the cgroup membership (sets to root)
        @param pid: pid of the process
        @return: 0 when PASSED
        """
        return self.set_cgroup(pid, self.root)

    def get_property(self, prop, pwd=None):
        """
        Gets the property value
        @param prop: property name (file)
        @param pwd: cgroup directory
        @return: [] values or None when FAILED
Exemple #29
0
    def _run_badblocks(self, dev, sector_size, tmout):
        """
        Runs badblocks.

        """

        # Run badblocks on the selected partition, with parameters:
        # -s = show progress
        # -v = verbose (print error count)
        # -w = destructive write+read test
        # -b = block size (set equal to sector size)
        argv = [self._BADBLOCKS, '-svw', '-d', str(sector_size), dev]
        msg = 'Running: ' + ' '.join(map(str, argv))
        logging.info(msg)
        badblocks_proc = subprocess.Popen(
                argv,
                shell=False,
                stderr=subprocess.STDOUT, # Combine stderr with stdout.
                stdout=subprocess.PIPE)

        # Start timeout timer thread.
        t = threading.Timer(tmout, self._timeout, [badblocks_proc])
        t.start()

        # Get badblocks output.
        stdout, _ = badblocks_proc.communicate()

        # Stop timer if badblocks has finished.
        t.cancel()

        # Check badblocks exit status.
        if badblocks_proc.returncode != 0:
            raise error.TestError('badblocks returned with code: %s',
                                  badblocks_proc.returncode)

        # Parse and log badblocks output.
        logging.info('badblocks output:')
        lines = stdout.split('\n')
        del lines[-1] # Remove blank line at end.
        logging.info(lines[0])
        logging.info(lines[1])
        # Log the progress of badblocks (line 2 onwards, minus last line).
        for line in lines[2:-1]:
            # replace backspace characters with a newline character.
            line = re.sub(r'[\b]+', '\n', line)
            # Log test pattern info.
            pattern_info = line[:line.find(':') + 1]
            logging.info('%s', pattern_info)
            sublines = line[line.find(':') + 2:].split('\n')
            for subline in sublines:
                logging.info('%s', subline)
        # Log result (last line).
        logging.info(lines[-1])

        # Get run time in seconds.
        min_sec = re.match(r'(\w+):(\w+)', lines[-2].split()[-4])
        runtime = int(min_sec.group(1)) * 60 + int(min_sec.group(2))

        # Update longest run time.
        if self._longest_runtime < runtime:
            self._longest_runtime = runtime

        # Check badblocks result.
        result = lines[-1].strip()
        if result != self._EXPECTED_BADBLOCKS_OUTPUT:
            self._fail_count += 1
            return
        self._pass_count += 1
Exemple #30
0
        "gro": (ro_callback, ("rx", ), ("lro", )),
        "lro": (rx_callback, (), ("gro", )),
    }
    ethtool_save_params()
    success = True
    try:
        for f_type in supported_features:
            callback = test_matrix[f_type][0]
            for i in test_matrix[f_type][2]:
                if not ethtool_set(i, "off"):
                    logging.error("Fail to disable %s", i)
                    success = False
            for i in [f for f in test_matrix[f_type][1]] + [f_type]:
                if not ethtool_set(i, "on"):
                    logging.error("Fail to enable %s", i)
                    success = False
            if not callback():
                raise error.TestFail("Test failed, %s: on", f_type)

            if not ethtool_set(f_type, "off"):
                logging.error("Fail to disable %s", f_type)
                success = False
            if not callback(status="off"):
                raise error.TestFail("Test failed, %s: off", f_type)
        if not success:
            raise error.TestError("Enable/disable offload function fail")
    finally:
        ethtool_restore_params()
        session.close()
        session2.close()