Example #1
0
 def _ssh_close(self):
     if self._ssh_client:
         try:
             self._ssh_client.close()
         except SSHException:
             LOG.warning('Failed to close SSH connection.')
         self._ssh_client = None
Example #2
0
    def shutdown(self, wait=True):
        """Finds console log then stopping/deallocates VM"""
        LOG.debug('waiting on console log before stopping')
        attempts, exists = 5, False
        while not exists and attempts:
            try:
                attempts -= 1
                exists = self.blob_client.get_blob_to_bytes(
                    self.container, self.blob)
                LOG.debug('found console log')
            except Exception as e:
                if attempts:
                    LOG.debug(
                        'Unable to find console log, '
                        '%s attempts remaining', attempts)
                    sleep(15)
                else:
                    LOG.warning('Could not find console log: %s', e)

        LOG.debug('stopping instance %s', self.image_id)
        vm_deallocate = \
            self.platform.compute_client.virtual_machines.deallocate(
                self.platform.resource_group.name, self.image_id)
        if wait:
            vm_deallocate.wait()
Example #3
0
def collect_test_data(args, snapshot, os_name, test_name):
    """Collect data for test case.

    @param args: cmdline arguments
    @param snapshot: instantiated snapshot
    @param test_name: name or path of test to run
    @return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warning('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    # if testcase requires a feature flag that the image does not support,
    # skip the testcase with a warning
    req_features = test_config.get('required_features', [])
    if any(feature not in snapshot.features for feature in req_features):
        LOG.warning('test config %s requires features not supported by image, '
                    'skipping.\nrequired features: %s\nsupported features: %s',
                    test_name, req_features, snapshot.features)
        return ({}, 0)

    # if there are user data overrides required for this test case, apply them
    overrides = snapshot.config.get('user_data_overrides', {})
    if overrides:
        LOG.debug('updating user data for collect with: %s', overrides)
        user_data = util.update_user_data(user_data, overrides)

    # create test instance
    component = PlatformComponent(
        partial(platforms.get_instance, snapshot, user_data,
                block=True, start=False, use_desc=test_name))

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(run_single, 'boot instance', partial(
            instance.start, wait=True, wait_for_cloud_init=True))
        collect_calls = [partial(run_single, 'script {}'.format(script_name),
                                 partial(collect_script, instance,
                                         test_output_dir, script, script_name))
                         for script_name, script in test_scripts.items()]

        console_log = partial(
            run_single, 'collect console',
            partial(collect_console, instance, test_output_dir))

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls + [console_log])

    return res
Example #4
0
 def _ssh_close(self):
     if self._ssh_client:
         try:
             self._ssh_client.close()
         except SSHException:
             LOG.warning('Failed to close SSH connection.')
         self._ssh_client = None
Example #5
0
def normalize_create_args(args):
    """Normalize CREATE arguments.

    @param args: parsed args
    @return_value: updated args, or None if errors occurred
    """
    # ensure valid name for new test
    if len(args.name.split('/')) != 2:
        LOG.error('invalid test name: %s', args.name)
        return None
    if os.path.exists(config.name_to_path(args.name)):
        msg = 'test: {} already exists'.format(args.name)
        if args.force:
            LOG.warning('%s but ignoring due to --force', msg)
        else:
            LOG.error(msg)
            return None

    # ensure test config valid if specified
    if isinstance(args.config, str) and len(args.config) == 0:
        LOG.error('test config cannot be empty if specified')
        return None

    # ensure description valid if specified
    if (isinstance(args.description, str) and
            (len(args.description) > 70 or len(args.description) == 0)):
        LOG.error('test description must be between 1 and 70 characters')
        return None

    return args
Example #6
0
def normalize_create_args(args):
    """Normalize CREATE arguments.

    @param args: parsed args
    @return_value: updated args, or None if errors occurred
    """
    # ensure valid name for new test
    if len(args.name.split('/')) != 2:
        LOG.error('invalid test name: %s', args.name)
        return None
    if os.path.exists(config.name_to_path(args.name)):
        msg = 'test: {} already exists'.format(args.name)
        if args.force:
            LOG.warning('%s but ignoring due to --force', msg)
        else:
            LOG.error(msg)
            return None

    # ensure test config valid if specified
    if isinstance(args.config, str) and len(args.config) == 0:
        LOG.error('test config cannot be empty if specified')
        return None

    # ensure description valid if specified
    if (isinstance(args.description, str)
            and (len(args.description) > 70 or len(args.description) == 0)):
        LOG.error('test description must be between 1 and 70 characters')
        return None

    return args
Example #7
0
def collect_test_data(args, snapshot, os_name, test_name):
    """
    collect data for test case
    args: cmdline arguments
    snapshot: instantiated snapshot
    test_name: name or path of test to run
    return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))
    boot_timeout = (test_config.get('boot_timeout') if isinstance(
        test_config.get('boot_timeout'), int) else
                    snapshot.config.get('timeout'))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warning('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    # create test instance
    component = PlatformComponent(
        partial(instances.get_instance,
                snapshot,
                user_data,
                block=True,
                start=False,
                use_desc=test_name))

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(
            run_single, 'boot instance',
            partial(instance.start, wait=True, wait_time=boot_timeout))
        collect_calls = [
            partial(
                run_single, 'script {}'.format(script_name),
                partial(collect_script, instance, test_output_dir, script,
                        script_name))
            for script_name, script in test_scripts.items()
        ]

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls)

    return res
Example #8
0
def verify_data(data_dir, platform, os_name, tests):
    """Verify test data is correct.

    @param data_dir: top level directory for all tests
    @param platform: The platform name we for this test data (e.g. lxd)
    @param os_name: The operating system under test (xenial, artful, etc.).
    @param tests: list of test names
    @return_value: {<test_name>: {passed: True/False, failures: []}}
    """
    base_dir = os.sep.join((data_dir, platform, os_name))
    runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
    res = {}
    for test_name in tests:
        LOG.debug('verifying test data for %s', test_name)

        # get cloudconfig for test
        test_conf = config.load_test_config(test_name)
        test_module = config.name_to_module(test_name)
        cloud_conf = test_conf['cloud_config']

        # load script outputs
        data = {'platform': platform, 'os_name': os_name}
        test_dir = os.path.join(base_dir, test_name)
        for script_name in os.listdir(test_dir):
            with open(os.path.join(test_dir, script_name), 'rb') as fp:
                data[script_name] = fp.read()

        # get test suite and launch tests
        suite = testcases.get_suite(test_module, data, cloud_conf)
        suite_results = runner.run(suite)
        res[test_name] = {
            'passed':
            suite_results.wasSuccessful(),
            'failures': [{
                'module': type(test_class).__base__.__module__,
                'class': type(test_class).__base__.__name__,
                'function': str(test_class).split()[0],
                'error': trace.splitlines()[-1],
                'traceback': trace,
            } for test_class, trace in suite_results.failures]
        }

        for failure in res[test_name]['failures']:
            LOG.warning('test case: %s failed %s.%s with: %s', test_name,
                        failure['class'], failure['function'],
                        failure['error'])

    return res
Example #9
0
def verify_data(base_dir, tests):
    """
    verify test data is correct,
    base_dir: base directory for data
    test_config: dict of all test config, from util.load_test_config()
    tests: list of test names
    return_value: {<test_name>: {passed: True/False, failures: []}}
    """
    runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
    res = {}
    for test_name in tests:
        LOG.debug('verifying test data for %s', test_name)

        # get cloudconfig for test
        test_conf = config.load_test_config(test_name)
        test_module = config.name_to_module(test_name)
        cloud_conf = test_conf['cloud_config']

        # load script outputs
        data = {}
        test_dir = os.path.join(base_dir, test_name)
        for script_name in os.listdir(test_dir):
            with open(os.path.join(test_dir, script_name), 'r') as fp:
                data[script_name] = fp.read()

        # get test suite and launch tests
        suite = testcases.get_suite(test_module, data, cloud_conf)
        suite_results = runner.run(suite)
        res[test_name] = {
            'passed':
            suite_results.wasSuccessful(),
            'failures': [{
                'module': type(test_class).__base__.__module__,
                'class': type(test_class).__base__.__name__,
                'function': str(test_class).split()[0],
                'error': trace.splitlines()[-1],
                'traceback': trace,
            } for test_class, trace in suite_results.failures]
        }

        for failure in res[test_name]['failures']:
            LOG.warning('test case: %s failed %s.%s with: %s', test_name,
                        failure['class'], failure['function'],
                        failure['error'])

    return res
Example #10
0
def verify_data(data_dir, platform, os_name, tests):
    """Verify test data is correct.

    @param data_dir: top level directory for all tests
    @param platform: The platform name we for this test data (e.g. lxd)
    @param os_name: The operating system under test (xenial, artful, etc.).
    @param tests: list of test names
    @return_value: {<test_name>: {passed: True/False, failures: []}}
    """
    base_dir = os.sep.join((data_dir, platform, os_name))
    runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
    res = {}
    for test_name in tests:
        LOG.debug('verifying test data for %s', test_name)

        # get cloudconfig for test
        test_conf = config.load_test_config(test_name)
        test_module = config.name_to_module(test_name)
        cloud_conf = test_conf['cloud_config']

        # load script outputs
        data = {'platform': platform, 'os_name': os_name}
        test_dir = os.path.join(base_dir, test_name)
        for script_name in os.listdir(test_dir):
            with open(os.path.join(test_dir, script_name), 'rb') as fp:
                data[script_name] = fp.read()

        # get test suite and launch tests
        suite = testcases.get_suite(test_module, data, cloud_conf)
        suite_results = runner.run(suite)
        res[test_name] = {
            'passed': suite_results.wasSuccessful(),
            'failures': [{'module': type(test_class).__base__.__module__,
                          'class': type(test_class).__base__.__name__,
                          'function': str(test_class).split()[0],
                          'error': trace.splitlines()[-1],
                          'traceback': trace, }
                         for test_class, trace in suite_results.failures]
        }

        for failure in res[test_name]['failures']:
            LOG.warning('test case: %s failed %s.%s with: %s',
                        test_name, failure['class'], failure['function'],
                        failure['error'])

    return res
Example #11
0
def run(args):
    """
    run full test suite
    """
    failed = 0
    args.data_dir = tempfile.mkdtemp(prefix='cloud_test_data_')
    LOG.debug('using tmpdir %s', args.data_dir)
    try:
        failed += collect.collect(args)
        failed += verify.verify(args)
    except Exception:
        failed += 1
        raise
    finally:
        # TODO: make this configurable via environ or cmdline
        if failed:
            LOG.warning('some tests failed, leaving data in %s', args.data_dir)
        else:
            shutil.rmtree(args.data_dir)
    return failed
Example #12
0
    def shutdown(self, wait=True, retry=1):
        """Shutdown instance."""
        if self.pylxd_container.status == 'Stopped':
            return

        try:
            LOG.debug("%s: shutting down (wait=%s)", self, wait)
            self.pylxd_container.stop(wait=wait)
        except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
            # An exception happens here sometimes (LP: #1783198)
            # LOG it, and try again.
            LOG.warning(("%s: shutdown(retry=%d) caught %s in shutdown "
                         "(response=%s): %s"), self, retry,
                        e.__class__.__name__, e.response, e)
            if isinstance(e, pylxd_exc.NotFound):
                LOG.debug("container_exists(%s) == %s", self.name,
                          self.platform.container_exists(self.name))
            if retry == 0:
                raise e
            return self.shutdown(wait=wait, retry=retry - 1)
Example #13
0
def verify(args):
    """Verify test data.

    @param args: directory of test data
    @return_value: 0 for success, or number of failed tests
    """
    failed = 0
    res = {}

    # find test data
    tests = util.list_test_data(args.data_dir)

    for platform in tests.keys():
        res[platform] = {}
        for os_name in tests[platform].keys():
            test_name = "platform='{}', os='{}'".format(platform, os_name)
            LOG.info('test: %s verifying test data', test_name)

            # run test
            res[platform][os_name] = verify_data(
                os.sep.join((args.data_dir, platform, os_name)),
                tests[platform][os_name])

            # handle results
            fail_list = [
                k for k, v in res[platform][os_name].items()
                if not v.get('passed')
            ]
            if len(fail_list) == 0:
                LOG.info('test: %s passed all tests', test_name)
            else:
                LOG.warning('test: %s failed %s tests', test_name,
                            len(fail_list))
            failed += len(fail_list)

    # dump results
    LOG.debug('verify results: %s', res)
    if args.result:
        util.merge_results({'verify': res}, args.result)

    return failed
Example #14
0
    def shutdown(self, wait=True, retry=1):
        """Shutdown instance."""
        if self.pylxd_container.status == 'Stopped':
            return

        try:
            LOG.debug("%s: shutting down (wait=%s)", self, wait)
            self.pylxd_container.stop(wait=wait)
        except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
            # An exception happens here sometimes (LP: #1783198)
            # LOG it, and try again.
            LOG.warning(
                ("%s: shutdown(retry=%d) caught %s in shutdown "
                 "(response=%s): %s"),
                self, retry, e.__class__.__name__, e.response, e)
            if isinstance(e, pylxd_exc.NotFound):
                LOG.debug("container_exists(%s) == %s",
                          self.name, self.platform.container_exists(self.name))
            if retry == 0:
                raise e
            return self.shutdown(wait=wait, retry=retry - 1)
Example #15
0
def verify(args):
    """Verify test data.

    @param args: directory of test data
    @return_value: 0 for success, or number of failed tests
    """
    failed = 0
    res = {}

    # find test data
    tests = util.list_test_data(args.data_dir)

    for platform in tests.keys():
        res[platform] = {}
        for os_name in tests[platform].keys():
            test_name = "platform='{}', os='{}'".format(platform, os_name)
            LOG.info('test: %s verifying test data', test_name)

            # run test
            res[platform][os_name] = verify_data(
                args.data_dir, platform, os_name,
                tests[platform][os_name])

            # handle results
            fail_list = [k for k, v in res[platform][os_name].items()
                         if not v.get('passed')]
            if len(fail_list) == 0:
                LOG.info('test: %s passed all tests', test_name)
            else:
                LOG.warning('test: %s failed %s tests', test_name,
                            len(fail_list))
            failed += len(fail_list)

    # dump results
    LOG.debug('\n---- Verify summarized results:\n%s', format_results(res))
    if args.result:
        util.merge_results({'verify': res}, args.result)

    return failed
Example #16
0
    def _wait_for_system(self, wait_for_cloud_init):
        """Wait until system has fully booted and cloud-init has finished.

        @param wait_time: maximum time to wait
        @return_value: None, may raise OSError if wait_time exceeded
        """
        def clean_test(test):
            """Clean formatting for system ready test testcase."""
            return ' '.join(l for l in test.strip().splitlines()
                            if not l.lstrip().startswith('#'))

        boot_timeout = self.config['boot_timeout']
        tests = [self.config['system_ready_script']]
        if wait_for_cloud_init:
            tests.append(self.config['cloud_init_ready_script'])

        formatted_tests = ' && '.join(clean_test(t) for t in tests)
        cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '
               'exit 0; sleep 1; done; exit 1').format(time=boot_timeout,
                                                       test=formatted_tests)

        end_time = time.time() + boot_timeout
        while True:
            try:
                return_code = self.execute(
                    cmd, rcs=(0, 1), description='wait for instance start'
                )[-1]
                if return_code == 0:
                    break
            except util.InTargetExecuteError:
                LOG.warning("failed to connect via SSH")

            if time.time() < end_time:
                time.sleep(3)
            else:
                raise util.PlatformError('ssh', 'after %ss instance is not '
                                         'reachable' % boot_timeout)
Example #17
0
    def _wait_for_system(self, wait_for_cloud_init):
        """Wait until system has fully booted and cloud-init has finished.

        @param wait_time: maximum time to wait
        @return_value: None, may raise OSError if wait_time exceeded
        """
        def clean_test(test):
            """Clean formatting for system ready test testcase."""
            return ' '.join(line for line in test.strip().splitlines()
                            if not line.lstrip().startswith('#'))

        boot_timeout = self.config['boot_timeout']
        tests = [self.config['system_ready_script']]
        if wait_for_cloud_init:
            tests.append(self.config['cloud_init_ready_script'])

        formatted_tests = ' && '.join(clean_test(t) for t in tests)
        cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '
               'exit 0; sleep 1; done; exit 1').format(time=boot_timeout,
                                                       test=formatted_tests)

        end_time = time.time() + boot_timeout
        while True:
            try:
                return_code = self.execute(
                    cmd, rcs=(0, 1), description='wait for instance start')[-1]
                if return_code == 0:
                    break
            except util.InTargetExecuteError:
                LOG.warning("failed to connect via SSH")

            if time.time() < end_time:
                time.sleep(3)
            else:
                raise util.PlatformError(
                    'ssh', 'after %ss instance is not '
                    'reachable' % boot_timeout)
Example #18
0
def collect_test_data(args, snapshot, os_name, test_name):
    """Collect data for test case.

    @param args: cmdline arguments
    @param snapshot: instantiated snapshot
    @param test_name: name or path of test to run
    @return_value: tuple of results and fail count
    """
    res = ({}, 1)

    # load test config
    test_name_in = test_name
    test_name = config.path_to_name(test_name)
    test_config = config.load_test_config(test_name)
    user_data = test_config['cloud_config']
    test_scripts = test_config['collect_scripts']
    test_output_dir = os.sep.join(
        (args.data_dir, snapshot.platform_name, os_name, test_name))

    # if test is not enabled, skip and return 0 failures
    if not test_config.get('enabled', False):
        LOG.warning('test config %s is not enabled, skipping', test_name)
        return ({}, 0)

    test_class = get_test_class(
        config.name_to_module(test_name_in),
        test_data={'platform': snapshot.platform_name, 'os_name': os_name},
        test_conf=test_config['cloud_config'])
    try:
        test_class.maybeSkipTest()
    except base.SkipTest as s:
        LOG.warning('skipping test config %s: %s', test_name, s)
        return ({}, 0)

    # if testcase requires a feature flag that the image does not support,
    # skip the testcase with a warning
    req_features = test_config.get('required_features', [])
    if any(feature not in snapshot.features for feature in req_features):
        LOG.warning('test config %s requires features not supported by image, '
                    'skipping.\nrequired features: %s\nsupported features: %s',
                    test_name, req_features, snapshot.features)
        return ({}, 0)

    # if there are user data overrides required for this test case, apply them
    overrides = snapshot.config.get('user_data_overrides', {})
    if overrides:
        LOG.debug('updating user data for collect with: %s', overrides)
        user_data = util.update_user_data(user_data, overrides)

    # create test instance
    component = PlatformComponent(
        partial(platforms.get_instance, snapshot, user_data,
                block=True, start=False, use_desc=test_name),
        preserve_instance=args.preserve_instance)

    LOG.info('collecting test data for test: %s', test_name)
    with component as instance:
        start_call = partial(run_single, 'boot instance', partial(
            instance.start, wait=True, wait_for_cloud_init=True))
        collect_calls = [partial(run_single, 'script {}'.format(script_name),
                                 partial(collect_script, instance,
                                         test_output_dir, script, script_name))
                         for script_name, script in test_scripts.items()]

        res = run_stage('collect for test: {}'.format(test_name),
                        [start_call] + collect_calls)

        instance.shutdown()
        collect_console(instance, test_output_dir)

    return res