Beispiel #1
0
def normalize_create_args(args):
    """
    normalize CREATE arguments
    args: parsed args
    return_value: updated args, or None if errors occurred
    """
    # ensure valid name for new test
    if len(args.name.split('/')) != 2:
        LOG.error('invalid test name: %s', args.name)
        return None
    if os.path.exists(config.name_to_path(args.name)):
        msg = 'test: {} already exists'.format(args.name)
        if args.force:
            LOG.warn('%s but ignoring due to --force', msg)
        else:
            LOG.error(msg)
            return None

    # ensure test config valid if specified
    if isinstance(args.config, str) and len(args.config) == 0:
        LOG.error('test config cannot be empty if specified')
        return None

    # ensure description valid if specified
    if (isinstance(args.description, str)
            and (len(args.description) > 70 or len(args.description) == 0)):
        LOG.error('test description must be between 1 and 70 characters')
        return None

    return args
Beispiel #2
0
def collect_test_data(args, snapshot, os_name, test_name):
    """Collect data for test case.

    @param args: cmdline arguments
    @param snapshot: instantiated snapshot
    @param test_name: name or path of test to run
    @return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warning('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    # if testcase requires a feature flag that the image does not support,
    # skip the testcase with a warning
    req_features = test_config.get('required_features', [])
    if any(feature not in snapshot.features for feature in req_features):
        LOG.warn('test config %s requires features not supported by image, '
                 'skipping.\nrequired features: %s\nsupported features: %s',
                 test_name, req_features, snapshot.features)
        return ({}, 0)

    # if there are user data overrides required for this test case, apply them
    overrides = snapshot.config.get('user_data_overrides', {})
    if overrides:
        LOG.debug('updating user data for collect with: %s', overrides)
        user_data = util.update_user_data(user_data, overrides)

    # create test instance
    component = PlatformComponent(
        partial(instances.get_instance, snapshot, user_data,
                block=True, start=False, use_desc=test_name))

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(run_single, 'boot instance', partial(
            instance.start, wait=True, wait_for_cloud_init=True))
        collect_calls = [partial(run_single, 'script {}'.format(script_name),
                                 partial(collect_script, instance,
                                         test_output_dir, script, script_name))
                         for script_name, script in test_scripts.items()]

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls)

    return res
Beispiel #3
0
def collect_test_data(args, snapshot, os_name, test_name):
    """
    collect data for test case
    args: cmdline arguments
    snapshot: instantiated snapshot
    test_name: name or path of test to run
    return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))
    boot_timeout = (test_config.get('boot_timeout') if isinstance(
        test_config.get('boot_timeout'), int) else
                    snapshot.config.get('timeout'))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warn('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    # create test instance
    component = PlatformComponent(
        partial(instances.get_instance,
                snapshot,
                user_data,
                block=True,
                start=False,
                use_desc=test_name))

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(
            run_single, 'boot instance',
            partial(instance.start, wait=True, wait_time=boot_timeout))
        collect_calls = [
            partial(
                run_single, 'script {}'.format(script_name),
                partial(collect_script, instance, test_output_dir, script,
                        script_name))
            for script_name, script in test_scripts.items()
        ]

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls)

    return res
Beispiel #4
0
def verify_data(base_dir, tests):
    """
    verify test data is correct,
    base_dir: base directory for data
    test_config: dict of all test config, from util.load_test_config()
    tests: list of test names
    return_value: {<test_name>: {passed: True/False, failures: []}}
    """
    runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
    res = {}
    for test_name in tests:
        LOG.debug('verifying test data for %s', test_name)

        # get cloudconfig for test
        test_conf = config.load_test_config(test_name)
        test_module = config.name_to_module(test_name)
        cloud_conf = test_conf['cloud_config']

        # load script outputs
        data = {}
        test_dir = os.path.join(base_dir, test_name)
        for script_name in os.listdir(test_dir):
            with open(os.path.join(test_dir, script_name), 'r') as fp:
                data[script_name] = fp.read()

        # get test suite and launch tests
        suite = testcases.get_suite(test_module, data, cloud_conf)
        suite_results = runner.run(suite)
        res[test_name] = {
            'passed':
            suite_results.wasSuccessful(),
            'failures': [{
                'module': type(test_class).__base__.__module__,
                'class': type(test_class).__base__.__name__,
                'function': str(test_class).split()[0],
                'error': trace.splitlines()[-1],
                'traceback': trace,
            } for test_class, trace in suite_results.failures]
        }

        for failure in res[test_name]['failures']:
            LOG.warn('test case: %s failed %s.%s with: %s', test_name,
                     failure['class'], failure['function'], failure['error'])

    return res
Beispiel #5
0
def run(args):
    """
    run full test suite
    """
    failed = 0
    args.data_dir = tempfile.mkdtemp(prefix='cloud_test_data_')
    LOG.debug('using tmpdir %s', args.data_dir)
    try:
        failed += collect.collect(args)
        failed += verify.verify(args)
    except Exception:
        failed += 1
        raise
    finally:
        # TODO: make this configurable via environ or cmdline
        if failed:
            LOG.warn('some tests failed, leaving data in %s', args.data_dir)
        else:
            shutil.rmtree(args.data_dir)
    return failed
Beispiel #6
0
def verify(args):
    """
    verify test data
    return_value: 0 for success, or number of failed tests
    """
    failed = 0
    res = {}

    # find test data
    tests = util.list_test_data(args.data_dir)

    for platform in tests.keys():
        res[platform] = {}
        for os_name in tests[platform].keys():
            test_name = "platform='{}', os='{}'".format(platform, os_name)
            LOG.info('test: %s verifying test data', test_name)

            # run test
            res[platform][os_name] = verify_data(
                os.sep.join((args.data_dir, platform, os_name)),
                tests[platform][os_name])

            # handle results
            fail_list = [
                k for k, v in res[platform][os_name].items()
                if not v.get('passed')
            ]
            if len(fail_list) == 0:
                LOG.info('test: %s passed all tests', test_name)
            else:
                LOG.warn('test: %s failed %s tests', test_name, len(fail_list))
            failed += len(fail_list)

    # dump results
    LOG.debug('verify results: %s', res)
    if args.result:
        util.merge_results({'verify': res}, args.result)

    return failed