Ejemplo n.º 1
0
    def _do_discovery(self, argv, Loader=None):
        """The discovery process is complicated by the fact that:

        * different test suites live under different directories
        * some test suites may not be available (CUDA)
        * some tests may have to be run serially, even in the presence of the '-m' flag."""

        from numba import cuda
        join = os.path.join
        loader = unittest.TestLoader() if Loader is None else Loader()
        topdir = os.path.abspath(join(os.path.dirname(__file__), '../..'))
        base_tests = loader.discover(join(topdir, 'numba/tests'), 'test*.py',
                                     topdir)
        cuda_tests = [
            loader.discover(join(topdir, 'numba/cuda/tests/nocuda'),
                            'test*.py', topdir)
        ]
        if cuda.is_available():
            gpus = cuda.list_devices()
            if gpus and gpus[0].compute_capability >= (2, 0):
                cuda_tests.append(
                    loader.discover(join(topdir, 'numba/cuda/tests/cudadrv'),
                                    'test*.py', topdir))
                cuda_tests.append(
                    loader.discover(join(topdir, 'numba/cuda/tests/cudapy'),
                                    'test*.py', topdir))
            else:
                print("skipped CUDA tests because GPU CC < 2.0")
        else:
            print("skipped CUDA tests")
        self.test = suite.TestSuite(tests=(base_tests,
                                           SerialSuite(cuda_tests)))
Ejemplo n.º 2
0
def build_tests(path,
                loader,
                host=None,
                port=8001,
                intercept=None,
                test_loader_name=None,
                fixture_module=None,
                response_handlers=None,
                prefix=''):
    """Read YAML files from a directory to create tests.

    Each YAML file represents an ordered sequence of HTTP requests.

    :param path: The directory where yaml files are located.
    :param loader: The TestLoader.
    :param host: The host to test against. Do not use with ``intercept``.
    :param port: The port to test against. Used with ``host``.
    :param intercept: WSGI app factory for wsgi-intercept.
    :param test_loader_name: Base name for test classes. Rarely used.
    :param fixture_module: Python module containing fixture classes.
    :param response_handers: ResponseHandler classes.
    :type response_handlers: List of ResponseHandler classes.
    :param prefix: A URL prefix for all URLs that are not fully qualified.
    :rtype: TestSuite containing multiple TestSuites (one for each YAML file).
    """

    # Exit immediately if we have no host to access, either via a real host
    # or an intercept.
    if not (bool(host) ^ bool(intercept)):
        raise AssertionError('must specify exactly one of host or intercept')

    if test_loader_name is None:
        test_loader_name = inspect.stack()[1]
        test_loader_name = os.path.splitext(
            os.path.basename(test_loader_name[1]))[0]

    # Initialize response handlers.
    response_handlers = response_handlers or []
    for handler in RESPONSE_HANDLERS + response_handlers:
        handler(case.HTTPTestCase)

    top_suite = suite.TestSuite()
    for test_file in glob.iglob('%s/*.yaml' % path):
        if intercept:
            host = str(uuid.uuid4())
        suite_dict = load_yaml(test_file)
        test_base_name = '%s_%s' % (
            test_loader_name, os.path.splitext(os.path.basename(test_file))[0])
        file_suite = test_suite_from_dict(loader, test_base_name, suite_dict,
                                          path, host, port, fixture_module,
                                          intercept, prefix)
        top_suite.addTest(file_suite)
    return top_suite
Ejemplo n.º 3
0
def load_tests(modules, loader):
    if modules:
        log.info("Executing modules: {0}".format(modules))
        module_suites = []
        for mod_name in modules:
            # Test names like cephfs.test_auto_repair
            module_suites.append(loader.loadTestsFromName(mod_name))
        log.info("Loaded: {0}".format(list(module_suites)))
        return suite.TestSuite(module_suites)
    else:
        log.info("Executing all cephfs tests")
        return loader.discover(
            os.path.join(os.path.dirname(os.path.abspath(__file__)), "cephfs"))
Ejemplo n.º 4
0
def build_tests(path,
                loader,
                host=None,
                port=8001,
                intercept=None,
                test_loader_name=None,
                fixture_module=None,
                response_handlers=None):
    """Read YAML files from a directory to create tests.

    Each YAML file represents an ordered sequence of HTTP requests.
    """

    if not (bool(host) ^ bool(intercept)):
        raise AssertionError('must specify exactly one of host or intercept')

    response_handlers = response_handlers or []
    top_suite = suite.TestSuite()

    if test_loader_name is None:
        test_loader_name = inspect.stack()[1]
        test_loader_name = os.path.splitext(
            os.path.basename(test_loader_name[1]))[0]

    yaml_file_glob = '%s/*.yaml' % path

    # Initialize the extensions for response handling.
    for handler in RESPONSE_HANDLERS + response_handlers:
        handler(case.HTTPTestCase)

    # Return an empty suite if we have no host to access, either via
    # a real host or an intercept
    for test_file in glob.iglob(yaml_file_glob):
        if intercept:
            host = str(uuid.uuid4())
        test_yaml = load_yaml(test_file)
        test_name = '%s_%s' % (
            test_loader_name, os.path.splitext(os.path.basename(test_file))[0])
        file_suite = test_suite_from_yaml(loader, test_name, test_yaml, path,
                                          host, port, fixture_module,
                                          intercept)
        top_suite.addTest(file_suite)
    return top_suite
Ejemplo n.º 5
0
def task(ctx, config):
    """
    Run the CephFS test cases.

    Run everything in tasks/cephfs/test_*.py:

    ::

        tasks:
          - install:
          - ceph:
          - ceph-fuse:
          - cephfs_test_runner:

    `modules` argument allows running only some specific modules:

    ::

        tasks:
            ...
          - cephfs_test_runner:
              modules:
                - tasks.cephfs.test_sessionmap
                - tasks.cephfs.test_auto_repair

    By default, any cases that can't be run on the current cluster configuration
    will generate a failure.  When the optional `fail_on_skip` argument is set
    to false, any tests that can't be run on the current configuration will
    simply be skipped:

    ::
        tasks:
            ...
         - cephfs_test_runner:
           fail_on_skip: false

    """

    ceph_cluster = CephCluster(ctx)

    if len(list(misc.all_roles_of_type(ctx.cluster, 'mds'))):
        mds_cluster = MDSCluster(ctx)
        fs = Filesystem(ctx)
    else:
        mds_cluster = None
        fs = None

    if len(list(misc.all_roles_of_type(ctx.cluster, 'mgr'))):
        mgr_cluster = MgrCluster(ctx)
    else:
        mgr_cluster = None

    # Mount objects, sorted by ID
    if hasattr(ctx, 'mounts'):
        mounts = [
            v for k, v in sorted(ctx.mounts.items(),
                                 lambda a, b: cmp(a[0], b[0]))
        ]
    else:
        # The test configuration has a filesystem but no fuse/kclient mounts
        mounts = []

    decorating_loader = DecoratingLoader({
        "ctx": ctx,
        "mounts": mounts,
        "fs": fs,
        "ceph_cluster": ceph_cluster,
        "mds_cluster": mds_cluster,
        "mgr_cluster": mgr_cluster,
    })

    fail_on_skip = config.get('fail_on_skip', True)

    # Put useful things onto ctx for interactive debugging
    ctx.fs = fs
    ctx.mds_cluster = mds_cluster
    ctx.mgr_cluster = mgr_cluster

    # Depending on config, either load specific modules, or scan for moduless
    if config and 'modules' in config and config['modules']:
        module_suites = []
        for mod_name in config['modules']:
            # Test names like cephfs.test_auto_repair
            module_suites.append(decorating_loader.loadTestsFromName(mod_name))
        overall_suite = suite.TestSuite(module_suites)
    else:
        # Default, run all tests
        overall_suite = decorating_loader.discover(
            os.path.join(os.path.dirname(os.path.abspath(__file__)),
                         "cephfs/"))

    if ctx.config.get("interactive-on-error", False):
        InteractiveFailureResult.ctx = ctx
        result_class = InteractiveFailureResult
    else:
        result_class = unittest.TextTestResult

    class LoggingResult(result_class):
        def startTest(self, test):
            log.info("Starting test: {0}".format(self.getDescription(test)))
            return super(LoggingResult, self).startTest(test)

        def addSkip(self, test, reason):
            if fail_on_skip:
                # Don't just call addFailure because that requires a traceback
                self.failures.append((test, reason))
            else:
                super(LoggingResult, self).addSkip(test, reason)

    # Execute!
    result = unittest.TextTestRunner(stream=LogStream(),
                                     resultclass=LoggingResult,
                                     verbosity=2,
                                     failfast=True).run(overall_suite)

    if not result.wasSuccessful():
        result.printErrors()  # duplicate output at end for convenience

        bad_tests = []
        for test, error in result.errors:
            bad_tests.append(str(test))
        for test, failure in result.failures:
            bad_tests.append(str(test))

        raise RuntimeError("Test failure: {0}".format(", ".join(bad_tests)))

    yield
Ejemplo n.º 6
0
 def __runner_run(self, item):
     # 开始run test
     test_suites = suite.TestSuite(item)
     runner = TextTestRunner(verbosity=1)
     result = runner.run(test_suites)
     self.all_result.append(result)
Ejemplo n.º 7
0
def exec_test():
    # Help developers by stopping up-front if their tree isn't built enough for all the
    # tools that the tests might want to use (add more here if needed)
    require_binaries = [
        "ceph-dencoder", "cephfs-journal-tool", "cephfs-data-scan",
        "cephfs-table-tool", "ceph-fuse", "rados"
    ]
    missing_binaries = [
        b for b in require_binaries
        if not os.path.exists(os.path.join(BIN_PREFIX, b))
    ]
    if missing_binaries:
        log.error("Some ceph binaries missing, please build them: {0}".format(
            " ".join(missing_binaries)))
        sys.exit(-1)

    test_dir = tempfile.mkdtemp()

    # Create as many of these as the biggest test requires
    clients = ["0", "1", "2", "3"]

    remote = LocalRemote()

    # Tolerate no MDSs or clients running at start
    ps_txt = remote.run(args=["ps", "-u" +
                              str(os.getuid())]).stdout.getvalue().strip()
    lines = ps_txt.split("\n")[1:]

    for line in lines:
        if 'ceph-fuse' in line or 'ceph-mds' in line:
            pid = int(line.split()[0])
            log.warn("Killing stray process {0}".format(line))
            os.kill(pid, signal.SIGKILL)

    class LocalCluster(object):
        def __init__(self, rolename="placeholder"):
            self.remotes = {remote: [rolename]}

        def only(self, requested):
            return self.__class__(rolename=requested)

    teuth_config['test_path'] = test_dir

    class LocalContext(object):
        def __init__(self):
            self.config = {}
            self.teuthology_config = teuth_config
            self.cluster = LocalCluster()
            self.daemons = DaemonGroup()

            # Shove some LocalDaemons into the ctx.daemons DaemonGroup instance so that any
            # tests that want to look these up via ctx can do so.
            # Inspect ceph.conf to see what roles exist
            for conf_line in open("ceph.conf").readlines():
                for svc_type in ["mon", "osd", "mds", "mgr"]:
                    if svc_type not in self.daemons.daemons:
                        self.daemons.daemons[svc_type] = {}
                    match = re.match("^\[{0}\.(.+)\]$".format(svc_type),
                                     conf_line)
                    if match:
                        svc_id = match.group(1)
                        self.daemons.daemons[svc_type][svc_id] = LocalDaemon(
                            svc_type, svc_id)

        def __del__(self):
            shutil.rmtree(self.teuthology_config['test_path'])

    ctx = LocalContext()

    mounts = []
    for client_id in clients:
        # Populate client keyring (it sucks to use client.admin for test clients
        # because it's awkward to find the logs later)
        client_name = "client.{0}".format(client_id)

        if client_name not in open("./keyring").read():
            p = remote.run(args=[
                os.path.join(BIN_PREFIX,
                             "ceph"), "auth", "get-or-create", client_name,
                "osd", "allow rw", "mds", "allow", "mon", "allow r"
            ])

            open("./keyring", "a").write(p.stdout.getvalue())

        mount = LocalFuseMount(test_dir, client_id)
        mounts.append(mount)
        if mount.is_mounted():
            log.warn("unmounting {0}".format(mount.mountpoint))
            mount.umount_wait()
        else:
            if os.path.exists(mount.mountpoint):
                os.rmdir(mount.mountpoint)
    filesystem = LocalFilesystem(ctx)
    ceph_cluster = LocalCephCluster(ctx)
    mds_cluster = LocalMDSCluster(ctx)
    mgr_cluster = LocalMgrCluster(ctx)

    from tasks.cephfs_test_runner import DecoratingLoader

    class LogStream(object):
        def __init__(self):
            self.buffer = ""

        def write(self, data):
            self.buffer += data
            if "\n" in self.buffer:
                lines = self.buffer.split("\n")
                for line in lines[:-1]:
                    pass
                    # sys.stderr.write(line + "\n")
                    log.info(line)
                self.buffer = lines[-1]

        def flush(self):
            pass

    decorating_loader = DecoratingLoader({
        "ctx": ctx,
        "mounts": mounts,
        "ceph_cluster": ceph_cluster,
        "fs": filesystem,
        "mds_cluster": mds_cluster,
        "mgr_cluster": mgr_cluster,
    })

    # For the benefit of polling tests like test_full -- in teuthology land we set this
    # in a .yaml, here it's just a hardcoded thing for the developer's pleasure.
    remote.run(args=[
        os.path.join(BIN_PREFIX, "ceph"), "tell", "osd.*", "injectargs",
        "--osd-mon-report-interval-max", "5"
    ])
    filesystem.set_ceph_conf("osd", "osd_mon_report_interval_max", "5")

    # Vstart defaults to two segments, which very easily gets a "behind on trimming" health warning
    # from normal IO latency.  Increase it for running teests.
    filesystem.set_ceph_conf("mds", "mds log max segments", "10")

    # Make sure the filesystem created in tests has uid/gid that will let us talk to
    # it after mounting it (without having to  go root).  Set in 'global' not just 'mds'
    # so that cephfs-data-scan will pick it up too.
    filesystem.set_ceph_conf("global", "mds root ino uid", "%s" % os.getuid())
    filesystem.set_ceph_conf("global", "mds root ino gid", "%s" % os.getgid())

    # Monkeypatch get_package_version to avoid having to work out what kind of distro we're on
    def _get_package_version(remote, pkg_name):
        # Used in cephfs tests to find fuse version.  Your development workstation *does* have >=2.9, right?
        return "2.9"

    import teuthology.packaging
    teuthology.packaging.get_package_version = _get_package_version

    def enumerate_methods(s):
        for t in s._tests:
            if isinstance(t, suite.BaseTestSuite):
                for sub in enumerate_methods(t):
                    yield sub
            else:
                yield s, t

    interactive_on_error = False

    args = sys.argv[1:]
    flags = [a for a in args if a.startswith("-")]
    modules = [a for a in args if not a.startswith("-")]
    for f in flags:
        if f == "--interactive":
            interactive_on_error = True
        else:
            log.error("Unknown option '{0}'".format(f))
            sys.exit(-1)

    if modules:
        log.info("Executing modules: {0}".format(modules))
        module_suites = []
        for mod_name in modules:
            # Test names like cephfs.test_auto_repair
            module_suites.append(decorating_loader.loadTestsFromName(mod_name))
        log.info("Loaded: {0}".format(list(module_suites)))
        overall_suite = suite.TestSuite(module_suites)
    else:
        log.info("Excuting all tests")
        overall_suite = decorating_loader.discover(
            os.path.dirname(os.path.abspath(__file__)))

    # Filter out tests that don't lend themselves to interactive running,
    victims = []
    for case, method in enumerate_methods(overall_suite):
        fn = getattr(method, method._testMethodName)

        drop_test = False

        if hasattr(fn, 'is_for_teuthology') and getattr(
                fn, 'is_for_teuthology') is True:
            drop_test = True
            log.warn("Dropping test because long running: ".format(
                method.id()))

        if getattr(fn, "needs_trimming", False) is True:
            drop_test = (os.getuid() != 0)
            log.warn("Dropping test because client trim unavailable: ".format(
                method.id()))

        if drop_test:
            # Don't drop the test if it was explicitly requested in arguments
            is_named = False
            for named in modules:
                if named.endswith(method.id()):
                    is_named = True
                    break

            if not is_named:
                victims.append((case, method))

    log.info(
        "Disabling {0} tests because of is_for_teuthology or needs_trimming".
        format(len(victims)))
    for s, method in victims:
        s._tests.remove(method)

    if interactive_on_error:
        result_class = InteractiveFailureResult
    else:
        result_class = unittest.TextTestResult
    fail_on_skip = False

    class LoggingResult(result_class):
        def startTest(self, test):
            log.info("Starting test: {0}".format(self.getDescription(test)))
            test.started_at = datetime.datetime.utcnow()
            return super(LoggingResult, self).startTest(test)

        def stopTest(self, test):
            log.info("Stopped test: {0} in {1}s".format(
                self.getDescription(test), (datetime.datetime.utcnow() -
                                            test.started_at).total_seconds()))

        def addSkip(self, test, reason):
            if fail_on_skip:
                # Don't just call addFailure because that requires a traceback
                self.failures.append((test, reason))
            else:
                super(LoggingResult, self).addSkip(test, reason)

    # Execute!
    result = unittest.TextTestRunner(stream=LogStream(),
                                     resultclass=LoggingResult,
                                     verbosity=2,
                                     failfast=True).run(overall_suite)

    if not result.wasSuccessful():
        result.printErrors()  # duplicate output at end for convenience

        bad_tests = []
        for test, error in result.errors:
            bad_tests.append(str(test))
        for test, failure in result.failures:
            bad_tests.append(str(test))

        sys.exit(-1)
    else:
        sys.exit(0)
Ejemplo n.º 8
0
def build_tests(path,
                loader,
                host=None,
                port=8001,
                intercept=None,
                test_loader_name=None,
                fixture_module=None,
                response_handlers=None,
                content_handlers=None,
                prefix='',
                require_ssl=False,
                cert_validate=True,
                url=None,
                inner_fixtures=None,
                verbose=False,
                use_prior_test=True,
                safe_yaml=True):
    """Read YAML files from a directory to create tests.

    Each YAML file represents a list of HTTP requests.

    :param path: The directory where yaml files are located.
    :param loader: The TestLoader.
    :param host: The host to test against. Do not use with ``intercept``.
    :param port: The port to test against. Used with ``host``.
    :param intercept: WSGI app factory for wsgi-intercept.
    :param test_loader_name: Base name for test classes. Use this to align the
                             naming of the tests with other tests in a system.
    :param fixture_module: Python module containing fixture classes.
    :param response_handers: :class:`~gabbi.handlers.ResponseHandler` classes.
    :type response_handlers: List of ResponseHandler classes.
    :param content_handlers: ContentHandler classes.
    :type content_handlers: List of ContentHandler classes.
    :param prefix: A URL prefix for all URLs that are not fully qualified.
    :param url: A full URL to test against. Replaces host, port and prefix.
    :param require_ssl: If ``True``, make all tests default to using SSL.
    :param inner_fixtures: A list of ``Fixtures`` to use with each
                           individual test request.
    :type inner_fixtures: List of classes with setUp and cleanUp methods to
                          be used as fixtures.
    :param verbose: If ``True`` or ``'all'``, make tests verbose by default
                    ``'headers'`` and ``'body'`` are also accepted.
    :param use_prior_test: If ``True``, uses prior test to create ordered
                           sequence of tests
    :param safe_yaml: If ``True``, recognizes only standard YAML tags and not
                      Python object
    :param cert_validate: If ``False`` ssl server certificate will be ignored,
                        further it will not be validated if provided
                        (set cert_reqs=CERT_NONE to the Http object)
    :rtype: TestSuite containing multiple TestSuites (one for each YAML file).
    """

    # If url is being used, reset host, port and prefix.
    if url:
        host, port, prefix, force_ssl = utils.host_info_from_target(url)
        if force_ssl and not require_ssl:
            require_ssl = force_ssl

    # Exit immediately if we have no host to access, either via a real host
    # or an intercept.
    if not ((host is not None) ^ bool(intercept)):
        raise AssertionError(
            'must specify exactly one of host or url, or intercept')

    # If the client has not provided a name to use as our base,
    # create one so that tests are effectively namespaced.
    if test_loader_name is None:
        all_test_base_name = inspect.stack()[1]
        all_test_base_name = os.path.splitext(
            os.path.basename(all_test_base_name[1]))[0]
    else:
        all_test_base_name = None

    # Initialize response and content handlers. This is effectively
    # duplication of effort but not results. This allows for
    # backwards compatibility for existing callers.
    response_handlers = response_handlers or []
    content_handlers = content_handlers or []
    handler_objects = []
    for handler in (content_handlers + response_handlers +
                    handlers.RESPONSE_HANDLERS):
        handler_objects.append(handler())

    top_suite = suite.TestSuite()
    for test_file in glob.iglob('%s/*.yaml' % path):
        if '_' in os.path.basename(test_file):
            warnings.warn(
                exception.GabbiSyntaxWarning(
                    "'_' in test filename %s. This can break suite grouping." %
                    test_file))
        if intercept:
            host = str(uuid.uuid4())
        suite_dict = utils.load_yaml(yaml_file=test_file, safe=safe_yaml)
        test_base_name = os.path.splitext(os.path.basename(test_file))[0]
        if all_test_base_name:
            test_base_name = '%s_%s' % (all_test_base_name, test_base_name)

        if require_ssl:
            if 'defaults' in suite_dict:
                suite_dict['defaults']['ssl'] = True
            else:
                suite_dict['defaults'] = {'ssl': True}

        if any((verbose == opt for opt in [True, 'all', 'headers', 'body'])):
            if 'defaults' in suite_dict:
                suite_dict['defaults']['verbose'] = verbose
            else:
                suite_dict['defaults'] = {'verbose': verbose}

        if not cert_validate:
            if 'defaults' in suite_dict:
                suite_dict['defaults']['cert_validate'] = False
            else:
                suite_dict['defaults'] = {'cert_validate': False}

        if not use_prior_test:
            if 'defaults' in suite_dict:
                suite_dict['defaults']['use_prior_test'] = use_prior_test
            else:
                suite_dict['defaults'] = {'use_prior_test': use_prior_test}

        file_suite = suitemaker.test_suite_from_dict(
            loader,
            test_base_name,
            suite_dict,
            path,
            host,
            port,
            fixture_module,
            intercept,
            prefix=prefix,
            test_loader_name=test_loader_name,
            handlers=handler_objects,
            inner_fixtures=inner_fixtures)
        top_suite.addTest(file_suite)
    return top_suite