Exemple #1
0
def collect_test_data(args, snapshot, os_name, test_name):
    """Collect data for test case.

    @param args: cmdline arguments
    @param snapshot: instantiated snapshot
    @param test_name: name or path of test to run
    @return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warning('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    # if testcase requires a feature flag that the image does not support,
    # skip the testcase with a warning
    req_features = test_config.get('required_features', [])
    if any(feature not in snapshot.features for feature in req_features):
        LOG.warning('test config %s requires features not supported by image, '
                    'skipping.\nrequired features: %s\nsupported features: %s',
                    test_name, req_features, snapshot.features)
        return ({}, 0)

    # if there are user data overrides required for this test case, apply them
    overrides = snapshot.config.get('user_data_overrides', {})
    if overrides:
        LOG.debug('updating user data for collect with: %s', overrides)
        user_data = util.update_user_data(user_data, overrides)

    # create test instance
    component = PlatformComponent(
        partial(platforms.get_instance, snapshot, user_data,
                block=True, start=False, use_desc=test_name))

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(run_single, 'boot instance', partial(
            instance.start, wait=True, wait_for_cloud_init=True))
        collect_calls = [partial(run_single, 'script {}'.format(script_name),
                                 partial(collect_script, instance,
                                         test_output_dir, script, script_name))
                         for script_name, script in test_scripts.items()]

        console_log = partial(
            run_single, 'collect console',
            partial(collect_console, instance, test_output_dir))

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls + [console_log])

    return res
Exemple #2
0
def collect_test_data(args, snapshot, os_name, test_name):
    """
    collect data for test case
    args: cmdline arguments
    snapshot: instantiated snapshot
    test_name: name or path of test to run
    return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))
    boot_timeout = (test_config.get('boot_timeout') if isinstance(
        test_config.get('boot_timeout'), int) else
                    snapshot.config.get('timeout'))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warning('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    # create test instance
    component = PlatformComponent(
        partial(instances.get_instance,
                snapshot,
                user_data,
                block=True,
                start=False,
                use_desc=test_name))

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(
            run_single, 'boot instance',
            partial(instance.start, wait=True, wait_time=boot_timeout))
        collect_calls = [
            partial(
                run_single, 'script {}'.format(script_name),
                partial(collect_script, instance, test_output_dir, script,
                        script_name))
            for script_name, script in test_scripts.items()
        ]

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls)

    return res
Exemple #3
0
def verify_data(data_dir, platform, os_name, tests):
    """Verify test data is correct.

    @param data_dir: top level directory for all tests
    @param platform: The platform name we for this test data (e.g. lxd)
    @param os_name: The operating system under test (xenial, artful, etc.).
    @param tests: list of test names
    @return_value: {<test_name>: {passed: True/False, failures: []}}
    """
    base_dir = os.sep.join((data_dir, platform, os_name))
    runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
    res = {}
    for test_name in tests:
        LOG.debug('verifying test data for %s', test_name)

        # get cloudconfig for test
        test_conf = config.load_test_config(test_name)
        test_module = config.name_to_module(test_name)
        cloud_conf = test_conf['cloud_config']

        # load script outputs
        data = {'platform': platform, 'os_name': os_name}
        test_dir = os.path.join(base_dir, test_name)
        for script_name in os.listdir(test_dir):
            with open(os.path.join(test_dir, script_name), 'rb') as fp:
                data[script_name] = fp.read()

        # get test suite and launch tests
        suite = testcases.get_suite(test_module, data, cloud_conf)
        suite_results = runner.run(suite)
        res[test_name] = {
            'passed':
            suite_results.wasSuccessful(),
            'failures': [{
                'module': type(test_class).__base__.__module__,
                'class': type(test_class).__base__.__name__,
                'function': str(test_class).split()[0],
                'error': trace.splitlines()[-1],
                'traceback': trace,
            } for test_class, trace in suite_results.failures]
        }

        for failure in res[test_name]['failures']:
            LOG.warning('test case: %s failed %s.%s with: %s', test_name,
                        failure['class'], failure['function'],
                        failure['error'])

    return res
Exemple #4
0
def verify_data(data_dir, platform, os_name, tests):
    """Verify test data is correct.

    @param data_dir: top level directory for all tests
    @param platform: The platform name we for this test data (e.g. lxd)
    @param os_name: The operating system under test (xenial, artful, etc.).
    @param tests: list of test names
    @return_value: {<test_name>: {passed: True/False, failures: []}}
    """
    base_dir = os.sep.join((data_dir, platform, os_name))
    runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
    res = {}
    for test_name in tests:
        LOG.debug('verifying test data for %s', test_name)

        # get cloudconfig for test
        test_conf = config.load_test_config(test_name)
        test_module = config.name_to_module(test_name)
        cloud_conf = test_conf['cloud_config']

        # load script outputs
        data = {'platform': platform, 'os_name': os_name}
        test_dir = os.path.join(base_dir, test_name)
        for script_name in os.listdir(test_dir):
            with open(os.path.join(test_dir, script_name), 'rb') as fp:
                data[script_name] = fp.read()

        # get test suite and launch tests
        suite = testcases.get_suite(test_module, data, cloud_conf)
        suite_results = runner.run(suite)
        res[test_name] = {
            'passed': suite_results.wasSuccessful(),
            'failures': [{'module': type(test_class).__base__.__module__,
                          'class': type(test_class).__base__.__name__,
                          'function': str(test_class).split()[0],
                          'error': trace.splitlines()[-1],
                          'traceback': trace, }
                         for test_class, trace in suite_results.failures]
        }

        for failure in res[test_name]['failures']:
            LOG.warning('test case: %s failed %s.%s with: %s',
                        test_name, failure['class'], failure['function'],
                        failure['error'])

    return res
Exemple #5
0
def verify_data(base_dir, tests):
    """
    verify test data is correct,
    base_dir: base directory for data
    test_config: dict of all test config, from util.load_test_config()
    tests: list of test names
    return_value: {<test_name>: {passed: True/False, failures: []}}
    """
    runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
    res = {}
    for test_name in tests:
        LOG.debug('verifying test data for %s', test_name)

        # get cloudconfig for test
        test_conf = config.load_test_config(test_name)
        test_module = config.name_to_module(test_name)
        cloud_conf = test_conf['cloud_config']

        # load script outputs
        data = {}
        test_dir = os.path.join(base_dir, test_name)
        for script_name in os.listdir(test_dir):
            with open(os.path.join(test_dir, script_name), 'r') as fp:
                data[script_name] = fp.read()

        # get test suite and launch tests
        suite = testcases.get_suite(test_module, data, cloud_conf)
        suite_results = runner.run(suite)
        res[test_name] = {
            'passed':
            suite_results.wasSuccessful(),
            'failures': [{
                'module': type(test_class).__base__.__module__,
                'class': type(test_class).__base__.__name__,
                'function': str(test_class).split()[0],
                'error': trace.splitlines()[-1],
                'traceback': trace,
            } for test_class, trace in suite_results.failures]
        }

        for failure in res[test_name]['failures']:
            LOG.warning('test case: %s failed %s.%s with: %s', test_name,
                        failure['class'], failure['function'],
                        failure['error'])

    return res
Exemple #6
0
def collect_test_data(args, snapshot, os_name, test_name):
    """Collect data for test case.

    @param args: cmdline arguments
    @param snapshot: instantiated snapshot
    @param test_name: name or path of test to run
    @return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name_in = test_name
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warning('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    test_class = get_test_class(
        config.name_to_module(test_name_in),
        test_data={'platform': snapshot.platform_name, 'os_name': os_name},
        test_conf=test_config['cloud_config'])
    try:
        test_class.maybeSkipTest()
    except base.SkipTest as s:
        LOG.warning('skipping test config %s: %s', test_name, s)
        return ({}, 0)

    # if testcase requires a feature flag that the image does not support,
    # skip the testcase with a warning
    req_features = test_config.get('required_features', [])
    if any(feature not in snapshot.features for feature in req_features):
        LOG.warning('test config %s requires features not supported by image, '
                    'skipping.\nrequired features: %s\nsupported features: %s',
                    test_name, req_features, snapshot.features)
        return ({}, 0)

    # if there are user data overrides required for this test case, apply them
    overrides = snapshot.config.get('user_data_overrides', {})
    if overrides:
        LOG.debug('updating user data for collect with: %s', overrides)
        user_data = util.update_user_data(user_data, overrides)

    # create test instance
    component = PlatformComponent(
        partial(platforms.get_instance, snapshot, user_data,
                block=True, start=False, use_desc=test_name),
        preserve_instance=args.preserve_instance)

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(run_single, 'boot instance', partial(
            instance.start, wait=True, wait_for_cloud_init=True))
        collect_calls = [partial(run_single, 'script {}'.format(script_name),
                                 partial(collect_script, instance,
                                         test_output_dir, script, script_name))
                         for script_name, script in test_scripts.items()]

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls)

        instance.shutdown()
        collect_console(instance, test_output_dir)

    return res