def shutdown(self, wait=True): """Shutdown instance.""" if self.pid: # This relies on _execute which uses sudo over ssh. The ssh # connection would get killed before sudo exited, so ignore errors. cmd = ['shutdown', 'now'] try: self._execute(cmd) except util.InTargetExecuteError: pass self._ssh_close() if wait: LOG.debug("Executed shutdown. waiting on pid %s to end", self.pid) time_for_shutdown = 120 give_up_at = time.time() + time_for_shutdown pid_file_path = '/proc/%s' % self.pid msg = ("pid %s did not exit in %s seconds after shutdown." % (self.pid, time_for_shutdown)) while True: if not os.path.exists(pid_file_path): break if time.time() > give_up_at: raise util.PlatformError("shutdown", msg) self.pid = None
def install_rpm(args, image): """Install rpm into image. @param args: cmdline arguments, must contain --rpm @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ os_family = util.get_os_family(image.properties['os']) if os_family != 'redhat': raise NotImplementedError('install rpm: {} not supported on os ' 'family: {}'.format(args.rpm, os_family)) # install rpm msg = 'install rpm: "{}" into target'.format(args.rpm) LOG.debug(msg) remote_path = os.path.join('/tmp', os.path.basename(args.rpm)) image.push_file(args.rpm, remote_path) image.execute(['rpm', '-U', remote_path], description=msg) fmt = ['--queryformat', '"%{VERSION}"'] (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: raise OSError('install rpm version "{}" does not match expected "{}"' .format(found_version, expected_version)) LOG.debug('successfully installed: %s, version %s', args.rpm, found_version)
def snapshot(self): """Create snapshot of image, block until done. Will return base image_ami if no instance has been booted, otherwise will run the clean script, shutdown the instance, create a custom AMI, and use that AMI once available. """ if not self._img_instance: return EC2Snapshot(self.platform, self.properties, self.config, self.features, self.image_ami, delete_on_destroy=False) if self.config.get('boot_clean_script'): self._img_instance.run_script(self.config.get('boot_clean_script')) self._img_instance.shutdown(wait=True) LOG.debug('creating custom ami from instance %s', self._img_instance.instance.instance_id) response = self.platform.ec2_client.create_image( Name='%s-%s' % (self.platform.tag, self.image_ami), InstanceId=self._img_instance.instance.instance_id ) image_ami_edited = response['ImageId'] # Create image and wait until it is in the 'available' state image = self.platform.ec2_resource.Image(image_ami_edited) image.wait_until_exists() waiter = self.platform.ec2_client.get_waiter('image_available') waiter.wait(ImageIds=[image.id]) image.reload() return EC2Snapshot(self.platform, self.properties, self.config, self.features, image_ami_edited)
def collect_image(args, platform, os_name): """Collect data for image. @param args: cmdline arguments @param platform: instantiated platform @param os_name: name of distro to collect for @return_value: tuple of results and fail count """ res = ({}, 1) os_config = config.load_os_config( platform.platform_name, os_name, require_enabled=True, feature_overrides=args.feature_override) LOG.debug('os config: %s', os_config) component = PlatformComponent( partial(platforms.get_image, platform, os_config)) LOG.info('acquiring image for os: %s', os_name) with component as image: res = run_stage('set up and collect data for os: {}'.format(os_name), [partial(setup_image.setup_image, args, image)] + [partial(collect_snapshot, args, image, os_name)], continue_after_error=False) return res
def install_deb(args, image): """Install deb into image. @param args: cmdline arguments, must contain --deb @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ # ensure system is compatible with package format os_family = util.get_os_family(image.properties['os']) if os_family != 'debian': raise NotImplementedError('install deb: {} not supported on os ' 'family: {}'.format(args.deb, os_family)) # install deb msg = 'install deb: "{}" into target'.format(args.deb) LOG.debug(msg) remote_path = os.path.join('/tmp', os.path.basename(args.deb)) image.push_file(args.deb, remote_path) image.execute( ['apt-get', 'install', '--allow-downgrades', '--assume-yes', remote_path], description=msg) # check installed deb version matches package fmt = ['-W', "--showformat=${Version}"] out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0] expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: raise OSError('install deb version "{}" does not match expected "{}"' .format(found_version, expected_version)) LOG.debug('successfully installed: %s, version: %s', args.deb, found_version)
def __exit__(self, etype, value, trace): """Destroy instance.""" if self.instance is not None: if self.preserve_instance: LOG.info('Preserving test instance %s', self.instance.name) else: self.instance.destroy()
def run_single(name, call): """Run a single function, keeping track of results and time. @param name: name of part @param call: call to make @return_value: a tuple of result and fail count """ res = { 'name': name, 'time': 0, 'errors': [], 'success': False } failed = 0 start_time = time.time() try: call() except Exception as e: failed += 1 res['errors'].append(str(e)) LOG.error('stage part: %s encountered error: %s', name, str(e)) trace = traceback.extract_tb(sys.exc_info()[-1]) LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace))) res['time'] = time.time() - start_time if failed == 0: res['success'] = True return res, failed
def normalize_create_args(args): """Normalize CREATE arguments. @param args: parsed args @return_value: updated args, or None if errors occurred """ # ensure valid name for new test if len(args.name.split('/')) != 2: LOG.error('invalid test name: %s', args.name) return None if os.path.exists(config.name_to_path(args.name)): msg = 'test: {} already exists'.format(args.name) if args.force: LOG.warning('%s but ignoring due to --force', msg) else: LOG.error(msg) return None # ensure test config valid if specified if isinstance(args.config, str) and len(args.config) == 0: LOG.error('test config cannot be empty if specified') return None # ensure description valid if specified if (isinstance(args.description, str) and (len(args.description) > 70 or len(args.description) == 0)): LOG.error('test description must be between 1 and 70 characters') return None return args
def _ssh_close(self): if self._ssh_client: try: self._ssh_client.close() except SSHException: LOG.warning('Failed to close SSH connection.') self._ssh_client = None
def shutdown(self, wait=True): """Shutdown instance.""" LOG.debug('stopping instance %s', self.instance.id) self.instance.stop() if wait: self.instance.wait_until_stopped() self.instance.reload()
def bddeb(args): """Entry point for build deb. @param args: cmdline arguments @return_value: fail count """ LOG.info('preparing to build cloud-init deb') _res, failed = run_stage('build deb', [partial(setup_build, args)]) return failed
def destroy(self): """Delete the instance used to create a custom image.""" if self._img_instance: LOG.debug('terminating backing instance %s', self._img_instance.instance.instance_id) self._img_instance.instance.terminate() self._img_instance.instance.wait_until_terminated() super(EC2Image, self).destroy()
def run_script(args, image): """Run a script in the target image. @param args: cmdline arguments, must contain --script @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ msg = 'run setup image script in target image' LOG.debug(msg) image.run_script(args.script, description=msg)
def destroy(self): """Clean up instance.""" if self.instance: LOG.debug('destroying instance %s', self.instance.id) self.instance.terminate() self.instance.wait_until_terminated() self._ssh_close() super(EC2Instance, self).destroy()
def destroy(self): """Deregister the backing AMI.""" if self.delete_on_destroy: image = self.platform.ec2_resource.Image(self.image_ami) snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId'] LOG.debug('removing custom ami %s', self.image_ami) self.platform.ec2_client.deregister_image(ImageId=self.image_ami) LOG.debug('removing custom snapshot %s', snapshot_id) self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id)
def normalize_bddeb_args(args): """Normalize BDDEB arguments. @param args: parsed args @return_value: updated args, or None if errors encountered """ # make sure cloud-init dir is accessible if not (args.cloud_init and os.path.isdir(args.cloud_init)): LOG.error('invalid cloud-init tree path') return None return args
def destroy(self): """Clean up instance.""" LOG.debug("%s: deleting container.", self) self.unfreeze() self.shutdown() self.pylxd_container.delete(wait=True) self._pylxd_container = None if self.platform.container_exists(self.name): raise OSError('%s: container was not properly removed' % self) if self._console_log_file and os.path.exists(self._console_log_file): os.unlink(self._console_log_file) shutil.rmtree(self.tmpd) super(LXDInstance, self).destroy()
def normalize_output_deb_args(args): """Normalize OUTPUT_DEB arguments. @param args: parsed args @return_value: updated args, or None if erros occurred """ # make sure to use abspath for deb args.deb = os.path.abspath(args.deb) if not args.deb.endswith('.deb'): LOG.error('output filename does not end in ".deb"') return None return args
def collect_console(instance, base_dir): """Collect instance console log. @param instance: instance to get console log for @param base_dir: directory to write console log to """ logfile = os.path.join(base_dir, 'console.log') LOG.debug('getting console log for %s to %s', instance.name, logfile) try: data = instance.console_log() except NotImplementedError as e: # args[0] is hacky, but thats all I see to get at the message. data = b'NotImplementedError:' + e.args[0].encode() with open(logfile, "wb") as fp: fp.write(data)
def collect(args): """Entry point for collection. @param args: cmdline arguments @return_value: fail count """ (res, failed) = run_stage( 'collect data', [partial(collect_platform, args, platform_name) for platform_name in args.platform]) LOG.debug('collect stages: %s', res) if args.result: util.merge_results({'collect_stages': res}, args.result) return failed
def enable_ppa(args, image): """Enable a ppa in the target image. @param args: cmdline arguments, must contain --ppa @param image: cloud_tests.image instance to operate on @return_value: None, may raise errors """ # ppa only supported on ubuntu (maybe debian?) if image.properties['os'].lower() != 'ubuntu': raise NotImplementedError('enabling a ppa is only available on ubuntu') # add ppa with add-apt-repository and update ppa = 'ppa:{}'.format(args.ppa) msg = 'enable ppa: "{}" in target'.format(ppa) LOG.debug(msg) cmd = 'add-apt-repository --yes {} && apt-get update'.format(ppa) image.execute(cmd, description=msg)
def console_log_method(self): if self._console_log_method is not None: return self._console_log_method client = which('lxc') if not client: raise PlatformError("No 'lxc' client.") elif _has_proper_console_support(): self._console_log_method = 'show-log' elif client.startswith("/snap"): self._console_log_method = 'logfile-snap' else: self._console_log_method = 'logfile-tmp' LOG.debug("Set console log method to %s", self._console_log_method) return self._console_log_method
def verify_data(data_dir, platform, os_name, tests): """Verify test data is correct. @param data_dir: top level directory for all tests @param platform: The platform name we for this test data (e.g. lxd) @param os_name: The operating system under test (xenial, artful, etc.). @param tests: list of test names @return_value: {<test_name>: {passed: True/False, failures: []}} """ base_dir = os.sep.join((data_dir, platform, os_name)) runner = unittest2.TextTestRunner(verbosity=util.current_verbosity()) res = {} for test_name in tests: LOG.debug('verifying test data for %s', test_name) # get cloudconfig for test test_conf = config.load_test_config(test_name) test_module = config.name_to_module(test_name) cloud_conf = test_conf['cloud_config'] # load script outputs data = {'platform': platform, 'os_name': os_name} test_dir = os.path.join(base_dir, test_name) for script_name in os.listdir(test_dir): with open(os.path.join(test_dir, script_name), 'rb') as fp: data[script_name] = fp.read() # get test suite and launch tests suite = testcases.get_suite(test_module, data, cloud_conf) suite_results = runner.run(suite) res[test_name] = { 'passed': suite_results.wasSuccessful(), 'failures': [{'module': type(test_class).__base__.__module__, 'class': type(test_class).__base__.__name__, 'function': str(test_class).split()[0], 'error': trace.splitlines()[-1], 'traceback': trace, } for test_class, trace in suite_results.failures] } for failure in res[test_name]['failures']: LOG.warning('test case: %s failed %s.%s with: %s', test_name, failure['class'], failure['function'], failure['error']) return res
def verify_data(base_dir, tests): """ verify test data is correct, base_dir: base directory for data test_config: dict of all test config, from util.load_test_config() tests: list of test names return_value: {<test_name>: {passed: True/False, failures: []}} """ runner = unittest.TextTestRunner(verbosity=util.current_verbosity()) res = {} for test_name in tests: LOG.debug('verifying test data for %s', test_name) # get cloudconfig for test test_conf = config.load_test_config(test_name) test_module = config.name_to_module(test_name) cloud_conf = test_conf['cloud_config'] # load script outputs data = {} test_dir = os.path.join(base_dir, test_name) for script_name in os.listdir(test_dir): with open(os.path.join(test_dir, script_name), 'r') as fp: data[script_name] = fp.read() # get test suite and launch tests suite = testcases.get_suite(test_module, data, cloud_conf) suite_results = runner.run(suite) res[test_name] = { 'passed': suite_results.wasSuccessful(), 'failures': [{ 'module': type(test_class).__base__.__module__, 'class': type(test_class).__base__.__name__, 'function': str(test_class).split()[0], 'error': trace.splitlines()[-1], 'traceback': trace, } for test_class, trace in suite_results.failures] } for failure in res[test_name]['failures']: LOG.warn('test case: %s failed %s.%s with: %s', test_name, failure['class'], failure['function'], failure['error']) return res
def upgrade(args, image): """Upgrade or install cloud-init from repo. @param args: cmdline arguments @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = 'apt-get update && apt-get install cloud-init --yes' elif os_family == 'redhat': cmd = 'sleep 10 && yum install cloud-init --assumeyes' else: raise NotImplementedError msg = 'upgrading cloud-init' LOG.debug(msg) image.execute(cmd, description=msg)
def run_stage(parent_name, calls, continue_after_error=True): """Run a stage of collection, keeping track of results and failures. @param parent_name: name of stage calls are under @param calls: list of function call taking no params. must return a tuple of results and failures. may raise exceptions @param continue_after_error: whether or not to proceed to the next call after catching an exception or recording a failure @return_value: a tuple of results and failures, with result containing results from the function call under 'stages', and a list of errors (if any on this level), and elapsed time running stage, and the name """ res = { 'name': parent_name, 'time': 0, 'errors': [], 'stages': [], 'success': False, } failed = 0 start_time = time.time() for call in calls: try: (call_res, call_failed) = call() res['stages'].append(call_res) except Exception as e: call_failed = 1 res['errors'].append(str(e)) LOG.error('stage: %s encountered error: %s', parent_name, str(e)) trace = traceback.extract_tb(sys.exc_info()[-1]) LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace))) failed += call_failed if call_failed and not continue_after_error: break res['time'] = time.time() - start_time if not failed: res['success'] = True return (res, failed)
def upgrade_full(args, image): """Run the system's full upgrade command. @param args: cmdline arguments @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = 'apt-get update && apt-get upgrade --yes' elif os_family == 'redhat': cmd = 'yum upgrade --assumeyes' else: raise NotImplementedError('upgrade command not configured for distro ' 'from family: {}'.format(os_family)) msg = 'full system upgrade' LOG.debug(msg) image.execute(cmd, description=msg)
def setup_image(args, image): """Set up image as specified in args. @param args: cmdline arguments @param image: cloud_tests.image instance to operate on @return_value: tuple of results and fail count """ # update the args if necessary for this image overrides = image.setup_overrides LOG.debug('updating args for setup with: %s', overrides) args = util.update_args(args, overrides, preserve_old=True) # mapping of setup cmdline arg name to setup function # represented as a tuple rather than a dict or odict as lookup by name not # needed, and order is important as --script and --upgrade go at the end handlers = ( # arg handler description ('deb', install_deb, 'setup func for --deb, install deb'), ('rpm', install_rpm, 'setup func for --rpm, install rpm'), ('repo', enable_repo, 'setup func for --repo, enable repo'), ('ppa', enable_ppa, 'setup func for --ppa, enable ppa'), ('script', run_script, 'setup func for --script, run script'), ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'), ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'), ) # determine which setup functions needed calls = [partial(stage.run_single, desc, partial(func, args, image)) for name, func, desc in handlers if getattr(args, name, None)] try: data = yaml.safe_load( image.read_data("/etc/cloud/build.info", decode=True)) info = ' '.join(["%s=%s" % (k, data.get(k)) for k in ("build_name", "serial") if k in data]) except Exception as e: info = "N/A (%s)" % e LOG.info('setting up image %s (info %s)', image, info) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) return res
def collect_snapshot(args, image, os_name): """Collect data for snapshot of image. @param args: cmdline arguments @param image: instantiated image with set up complete @return_value tuple of results and fail count """ res = ({}, 1) component = PlatformComponent(partial(platforms.get_snapshot, image)) LOG.debug('creating snapshot for %s', os_name) with component as snapshot: LOG.info('collecting test data for os: %s', os_name) res = run_stage( 'collect test data for {}'.format(os_name), [partial(collect_test_data, args, snapshot, os_name, test_name) for test_name in args.test_config]) return res
def enable_ppa(args, image): """ enable a ppa in the target image args: cmdline arguments, must contain --ppa image: cloud_tests.image instance to operate on return_value: None, may raise errors """ # ppa only supported on ubuntu (maybe debian?) if image.properties['os'] != 'ubuntu': raise NotImplementedError('enabling a ppa is only available on ubuntu') # add ppa with add-apt-repository and update ppa = 'ppa:{}'.format(args.ppa) LOG.debug('enabling %s', ppa) cmd = 'add-apt-repository --yes {} && apt-get update'.format(ppa) (out, err, exit) = image.execute(['/bin/sh', '-c', cmd]) if exit != 0: raise OSError( 'enable ppa for {} failed\n\tstdout: {}\n\tstderr: {}'.format( ppa, out, err))
def list_test_data(data_dir): """ find all tests with test data available in data_dir data_dir should contain <platforms>/<os_name>/<testnames>/<data> return_value: {<platform>: {<os_name>: [<testname>]}} """ if not os.path.isdir(data_dir): raise ValueError("bad data dir") res = {} for platform in os.listdir(data_dir): res[platform] = {} for os_name in os.listdir(os.path.join(data_dir, platform)): res[platform][os_name] = [ os.path.sep.join(f.split(os.path.sep)[-2:]) for f in glob.glob( os.sep.join((data_dir, platform, os_name, '*/*'))) ] LOG.debug('found test data: %s\n', res) return res
def run(args): """ run full test suite """ failed = 0 args.data_dir = tempfile.mkdtemp(prefix='cloud_test_data_') LOG.debug('using tmpdir %s', args.data_dir) try: failed += collect.collect(args) failed += verify.verify(args) except Exception: failed += 1 raise finally: # TODO: make this configurable via environ or cmdline if failed: LOG.warn('some tests failed, leaving data in %s', args.data_dir) else: shutil.rmtree(args.data_dir) return failed
def collect_snapshot(args, image, os_name): """ collect data for snapshot of image args: cmdline arguments image: instantiated image with set up complete return_value tuple of results and fail count """ res = ({}, 1) component = PlatformComponent(partial(snapshots.get_snapshot, image)) LOG.debug('creating snapshot for %s', os_name) with component as snapshot: LOG.info('collecting test data for os: %s', os_name) res = run_stage('collect test data for {}'.format(os_name), [ partial(collect_test_data, args, snapshot, os_name, test_name) for test_name in args.test_config ]) return res
def main(): """ entry point for cloud test suite """ # configure parser parser = argparse.ArgumentParser(prog='cloud_tests') subparsers = parser.add_subparsers(dest="subcmd") subparsers.required = True def add_subparser(name, description, arg_sets): """ add arguments to subparser """ subparser = subparsers.add_parser(name, help=description) for (_args, _kwargs) in (a for arg_set in arg_sets for a in arg_set): subparser.add_argument(*_args, **_kwargs) # configure subparsers for (name, (description, arg_sets)) in args.SUBCMDS.items(): add_subparser(name, description, [args.ARG_SETS[arg_set] for arg_set in arg_sets]) # parse arguments parsed = parser.parse_args() # process arguments configure_log(parsed) (_, arg_sets) = args.SUBCMDS[parsed.subcmd] for normalizer in [args.NORMALIZERS[arg_set] for arg_set in arg_sets]: parsed = normalizer(parsed) if not parsed: return -1 # run handler LOG.debug('running with args: %s\n', parsed) return { 'collect': collect.collect, 'create': manage.create, 'run': run, 'verify': verify.verify, }[parsed.subcmd](parsed)
def setup_image(args, image): """Set up image as specified in args. @param args: cmdline arguments @param image: cloud_tests.image instance to operate on @return_value: tuple of results and fail count """ # update the args if necessary for this image overrides = image.setup_overrides LOG.debug('updating args for setup with: %s', overrides) args = util.update_args(args, overrides, preserve_old=True) # mapping of setup cmdline arg name to setup function # represented as a tuple rather than a dict or odict as lookup by name not # needed, and order is important as --script and --upgrade go at the end handlers = ( # arg handler description ('deb', install_deb, 'setup func for --deb, install deb'), ('rpm', install_rpm, 'setup func for --rpm, install rpm'), ('repo', enable_repo, 'setup func for --repo, enable repo'), ('ppa', enable_ppa, 'setup func for --ppa, enable ppa'), ('script', run_script, 'setup func for --script, run script'), ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'), ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'), ) # determine which setup functions needed calls = [partial(stage.run_single, desc, partial(func, args, image)) for name, func, desc in handlers if getattr(args, name, None)] try: data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True)) info = ' '.join(["%s=%s" % (k, data.get(k)) for k in ("build_name", "serial") if k in data]) except Exception as e: info = "N/A (%s)" % e LOG.info('setting up %s (%s)', image, info) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) return res
def execute(self, command, stdin=None, env=None, rcs=None, description=None): """Execute command in instance, recording output, error and exit code. Assumes functional networking and execution as root with the target filesystem being available at /. @param command: the command to execute as root inside the image if command is a string, then it will be executed as: ['sh', '-c', command] @param stdin: bytes content for standard in @param env: environment variables @param rcs: return codes. None (default): non-zero exit code will raise exception. False: any is allowed (No execption raised). list of int: any rc not in the list will raise exception. @param description: purpose of command @return_value: tuple containing stdout data, stderr data, exit code """ if isinstance(command, str): command = ['sh', '-c', command] if rcs is None: rcs = (0, ) if description: LOG.debug('executing "%s"', description) else: LOG.debug("executing command: %s", shell_quote(command)) out, err, rc = self._execute(command=command, stdin=stdin, env=env) # False means accept anything. if (rcs is False or rc in rcs): return out, err, rc raise InTargetExecuteError(out, err, rc, command, description)
def collect_script(instance, base_dir, script, script_name): """Collect script data. @param instance: instance to run script on @param base_dir: base directory for output data @param script: script contents @param script_name: name of script to run @return_value: None, may raise errors """ LOG.debug('running collect script: %s', script_name) (out, err, exit) = instance.run_script( script.encode(), rcs=False, description='collect: {}'.format(script_name)) if err: LOG.debug("collect script %s had stderr: %s", script_name, err) if not isinstance(out, bytes): raise util.PlatformError( "Collection of '%s' returned type %s, expected bytes: %s" % (script_name, type(out), out)) c_util.write_file(os.path.join(base_dir, script_name), out)
def enable_repo(args, image): """Enable a repository in the target image. @param args: cmdline arguments, must contain --repo @param image: cloud_tests.image instance to operate on @return_value: None, may raise errors """ # find enable repo command for the distro os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = ('echo "{}" >> "/etc/apt/sources.list" '.format(args.repo) + '&& apt-get update') elif os_family == 'centos': cmd = 'yum-config-manager --add-repo="{}"'.format(args.repo) else: raise NotImplementedError('enable repo command not configured for ' 'distro from family: {}'.format(os_family)) msg = 'enable repo: "{}" in target'.format(args.repo) LOG.debug(msg) image.execute(cmd, description=msg)
def _get_blob_client(self): """ Use VM details to retrieve container and blob name. Then Create blob service client for sas token to retrieve console log. :return: blob service, container name, blob name """ LOG.debug('creating blob service for console log') storage = self.platform.storage_client.storage_accounts.get_properties( self.platform.resource_group.name, self.platform.storage.name) keys = self.platform.storage_client.storage_accounts.list_keys( self.platform.resource_group.name, self.platform.storage.name).keys[0].value virtual_machine = self.platform.compute_client.virtual_machines.get( self.platform.resource_group.name, self.instance.name, expand='instanceView') blob_uri = virtual_machine.instance_view.boot_diagnostics.\ serial_console_log_blob_uri container, blob = urlparse(blob_uri).path.split('/')[-2:] blob_client = BlockBlobService(account_name=storage.name, account_key=keys) sas = blob_client.generate_blob_shared_access_signature( container_name=container, blob_name=blob, protocol='https', expiry=datetime.utcnow() + timedelta(hours=1), permission=BlobPermissions.READ) blob_client = BlockBlobService(account_name=storage.name, sas_token=sas) return blob_client, container, blob
def get_image(self, img_conf): """Get image using specified image configuration. @param img_conf: configuration for image @return_value: cloud_tests.images instance """ ss_region = self.azure_location_to_simplestreams_region() filters = [ 'arch=%s' % 'amd64', 'endpoint=https://management.core.windows.net/', 'region=%s' % ss_region, 'release=%s' % img_conf['release'] ] LOG.debug('finding image using streams') image = self._query_streams(img_conf, filters) try: image_id = image['id'] LOG.debug('found image: %s', image_id) if image_id.find('__') > 0: image_id = image_id.split('__')[1] LOG.debug('image_id shortened to %s', image_id) except KeyError: raise RuntimeError('no images found for %s' % img_conf['release']) return AzureCloudImage(self, img_conf, image_id)
def normalize_output_args(args): """Normalize OUTPUT arguments. @param args: parsed args @return_value: updated args, or None if errors occurred """ if args.data_dir: args.data_dir = os.path.abspath(args.data_dir) if not os.path.exists(args.data_dir): os.mkdir(args.data_dir) if not args.data_dir: args.data_dir = None # ensure clean output dir if collect # ensure data exists if verify if args.subcmd == 'collect': if not util.is_clean_writable_dir(args.data_dir): LOG.error('data_dir must be empty/new and must be writable') return None return args
def normalize_setup_args(args): """Normalize SETUP arguments. @param args: parsed args @return_value: updated_args, or None if errors occurred """ # ensure deb or rpm valid if specified for pkg in (args.deb, args.rpm): if pkg is not None and not os.path.exists(pkg): LOG.error('cannot find package: %s', pkg) return None # if repo or ppa to be enabled run upgrade if args.repo or args.ppa: args.upgrade = True # if ppa is specified, remove leading 'ppa:' if any _ppa_header = 'ppa:' if args.ppa and args.ppa.startswith(_ppa_header): args.ppa = args.ppa[len(_ppa_header):] return args
def collect_platform(args, platform_name): """Collect data for platform. @param args: cmdline arguments @param platform_name: platform to collect for @return_value: tuple of results and fail count """ res = ({}, 1) platform_config = config.load_platform_config(platform_name, require_enabled=True) component = PlatformComponent( partial(platforms.get_platform, platform_name, platform_config)) LOG.info('setting up platform: %s', platform_name) with component as platform: res = run_stage('collect for platform: {}'.format(platform_name), [ partial(collect_image, args, platform, os_name) for os_name in args.os_name ]) return res
def collect_script(instance, base_dir, script, script_name): """Collect script data. @param instance: instance to run script on @param base_dir: base directory for output data @param script: script contents @param script_name: name of script to run @return_value: None, may raise errors """ LOG.debug('running collect script: %s', script_name) (out, err, exit) = instance.run_script( script.encode(), rcs=False, description='collect: {}'.format(script_name)) if err: LOG.debug("collect script %s exited '%s' and had stderr: %s", script_name, err, exit) if not isinstance(out, bytes): raise util.PlatformError( "Collection of '%s' returned type %s, expected bytes: %s" % (script_name, type(out), out)) c_util.write_file(os.path.join(base_dir, script_name), out)
def normalize_collect_args(args): """Normalize COLLECT arguments. @param args: parsed args @return_value: updated args, or None if errors occurred """ # platform should default to all supported if len(args.platform) == 0: args.platform = config.ENABLED_PLATFORMS args.platform = util.sorted_unique(args.platform) # os name should default to all enabled # if os name is provided ensure that all provided are supported if len(args.os_name) == 0: args.os_name = config.ENABLED_DISTROS else: supported = config.ENABLED_DISTROS invalid = [ os_name for os_name in args.os_name if os_name not in supported ] if len(invalid) != 0: LOG.error('invalid os name(s): %s', invalid) return None args.os_name = util.sorted_unique(args.os_name) # test configs should default to all enabled # if test configs are provided, ensure that all provided are valid if len(args.test_config) == 0: args.test_config = config.list_test_configs() else: valid = [] invalid = [] for name in args.test_config: if os.path.exists(name): valid.append(name) elif os.path.exists(config.name_to_path(name)): valid.append(config.name_to_path(name)) else: invalid.append(name) if len(invalid) != 0: LOG.error('invalid test config(s): %s', invalid) return None else: args.test_config = valid args.test_config = util.sorted_unique(args.test_config) # parse feature flag overrides and ensure all are valid if args.feature_override: overrides = args.feature_override args.feature_override = util.parse_conf_list( overrides, boolean=True, valid=config.list_feature_flags()) if not args.feature_override: LOG.error('invalid feature flag override(s): %s', overrides) return None else: args.feature_override = {} return args
def _create_nic(self): """Create network interface controller""" LOG.debug('creating nic') nic_name = '%s-nic' % self.resource_group.name nic_params = { 'location': self.location, 'ip_configurations': [{ 'name': 'ipconfig', 'subnet': { 'id': self.subnet.id }, 'publicIpAddress': { 'id': "/subscriptions/%s" "/resourceGroups/%s/providers/Microsoft.Network" "/publicIPAddresses/%s" % ( self.subscription_id, self.resource_group.name, self.public_ip.name), } }] } nic = self.network_client.network_interfaces.create_or_update( self.resource_group.name, nic_name, nic_params) return nic.result()
def _ssh_connect(self): """Connect via SSH. Attempt to SSH to the client on the specific IP and port. If it fails in some manner, then retry 2 more times for a total of 3 attempts; sleeping a few seconds between attempts. """ if self._ssh_client: return self._ssh_client if not self.ssh_ip or not self.ssh_port: raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" % (self.ssh_ip, self.ssh_port)) client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) retries = 3 while retries: try: client.connect(username=self.ssh_username, hostname=self.ssh_ip, port=self.ssh_port, pkey=private_key) self._ssh_client = client return client except (ConnectionRefusedError, AuthenticationException, BadHostKeyException, ConnectionResetError, SSHException, OSError): retries -= 1 LOG.debug('Retrying ssh connection on connect failure') time.sleep(3) ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % ( self.ssh_username, self.ssh_ip, self.ssh_port) raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
def get_image(self, img_conf): """Get image using specified image configuration. Hard coded for 'amd64' based images. @param img_conf: configuration for image @return_value: cloud_tests.images instance """ if img_conf['root-store'] == 'ebs': root_store = 'ssd' elif img_conf['root-store'] == 'instance-store': root_store = 'instance' else: raise RuntimeError('Unknown root-store type: %s' % (img_conf['root-store'])) filters = [ 'arch=%s' % 'amd64', 'endpoint=https://ec2.%s.amazonaws.com' % self.ec2_region, 'region=%s' % self.ec2_region, 'release=%s' % img_conf['release'], 'root_store=%s' % root_store, 'virt=hvm', ] LOG.debug('finding image using streams') image = self._query_streams(img_conf, filters) try: image_ami = image['id'] except KeyError as e: raise RuntimeError('No images found for %s!' % img_conf['release']) from e LOG.debug('found image: %s', image_ami) image = EC2Image(self, img_conf, image_ami) return image
def _wait_for_system(self, wait_for_cloud_init): """Wait until system has fully booted and cloud-init has finished. @param wait_time: maximum time to wait @return_value: None, may raise OSError if wait_time exceeded """ def clean_test(test): """Clean formatting for system ready test testcase.""" return ' '.join(line for line in test.strip().splitlines() if not line.lstrip().startswith('#')) boot_timeout = self.config['boot_timeout'] tests = [self.config['system_ready_script']] if wait_for_cloud_init: tests.append(self.config['cloud_init_ready_script']) formatted_tests = ' && '.join(clean_test(t) for t in tests) cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout, test=formatted_tests) end_time = time.time() + boot_timeout while True: try: return_code = self.execute( cmd, rcs=(0, 1), description='wait for instance start')[-1] if return_code == 0: break except util.InTargetExecuteError: LOG.warning("failed to connect via SSH") if time.time() < end_time: time.sleep(3) else: raise util.PlatformError( 'ssh', 'after %ss instance is not ' 'reachable' % boot_timeout)
def snapshot(self): """Create snapshot of image, block until done. Will return base image_ami if no instance has been booted, otherwise will run the clean script, shutdown the instance, create a custom AMI, and use that AMI once available. """ if not self._img_instance: return EC2Snapshot(self.platform, self.properties, self.config, self.features, self.image_ami, delete_on_destroy=False) if self.config.get('boot_clean_script'): self._img_instance.run_script(self.config.get('boot_clean_script')) self._img_instance.shutdown(wait=True) LOG.debug('creating custom ami from instance %s', self._img_instance.instance.instance_id) response = self.platform.ec2_client.create_image( Name='%s-%s' % (self.platform.tag, self.image_ami), InstanceId=self._img_instance.instance.instance_id) image_ami_edited = response['ImageId'] # Create image and wait until it is in the 'available' state image = self.platform.ec2_resource.Image(image_ami_edited) image.wait_until_exists() waiter = self.platform.ec2_client.get_waiter('image_available') waiter.wait(ImageIds=[image.id]) image.reload() return EC2Snapshot(self.platform, self.properties, self.config, self.features, image_ami_edited)
def install_deb(args, image): """ install deb into image args: cmdline arguments, must contain --deb image: cloud_tests.images instance to operate on return_value: None, may raise errors """ # ensure system is compatible with package format os_family = util.get_os_family(image.properties['os']) if os_family != 'debian': raise NotImplementedError('install deb: {} not supported on os ' 'family: {}'.format(args.deb, os_family)) # install deb LOG.debug('installing deb: %s into target', args.deb) remote_path = os.path.join('/tmp', os.path.basename(args.deb)) image.push_file(args.deb, remote_path) (out, err, exit) = image.execute(['dpkg', '-i', remote_path]) if exit != 0: raise OSError( 'failed install deb: {}\n\tstdout: {}\n\tstderr: {}'.format( args.deb, out, err)) # check installed deb version matches package fmt = ['-W', "--showformat='${Version}'"] (out, err, exit) = image.execute(['dpkg-deb'] + fmt + [remote_path]) expected_version = out.strip() (out, err, exit) = image.execute(['dpkg-query'] + fmt + ['cloud-init']) found_version = out.strip() if expected_version != found_version: raise OSError( 'install deb version "{}" does not match expected "{}"'.format( found_version, expected_version)) LOG.debug('successfully installed: %s, version: %s', args.deb, found_version)
def enable_repo(args, image): """ enable a repository in the target image args: cmdline arguments, must contain --repo image: cloud_tests.image instance to operate on return_value: None, may raise errors """ # find enable repo command for the distro os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = ('echo "{}" >> "/etc/apt/sources.list" '.format(args.repo) + '&& apt-get update') elif os_family == 'centos': cmd = 'yum-config-manager --add-repo="{}"'.format(args.repo) else: raise NotImplementedError('enable repo command not configured for ' 'distro from family: {}'.format(os_family)) LOG.debug('enabling repo: "%s"', args.repo) (out, err, exit) = image.execute(['/bin/sh', '-c', cmd]) if exit != 0: raise OSError( 'enable repo {} failed\n\tstdout: {}\n\tstderr: {}'.format( args.repo, out, err))
def shutdown(self, wait=True): """Finds console log then stopping/deallocates VM""" LOG.debug('waiting on console log before stopping') attempts, exists = 5, False while not exists and attempts: try: attempts -= 1 exists = self.blob_client.get_blob_to_bytes( self.container, self.blob) LOG.debug('found console log') except Exception as e: if attempts: LOG.debug('Unable to find console log, ' '%s attempts remaining', attempts) sleep(15) else: LOG.warning('Could not find console log: %s', e) LOG.debug('stopping instance %s', self.image_id) vm_deallocate = \ self.platform.compute_client.virtual_machines.deallocate( self.platform.resource_group.name, self.image_id) if wait: vm_deallocate.wait()
def _has_proper_console_support(): stdout, _ = subp(['lxc', 'info']) info = load_yaml(stdout) reason = None if 'console' not in info.get('api_extensions', []): reason = "LXD server does not support console api extension" else: dver = info.get('environment', {}).get('driver_version', "") if dver.startswith("2.") or dver.startswith("1."): reason = "LXD Driver version not 3.x+ (%s)" % dver else: try: stdout = subp(['lxc', 'console', '--help'], decode=False)[0] if not (b'console' in stdout and b'log' in stdout): reason = "no '--log' in lxc console --help" except ProcessExecutionError: reason = "no 'console' command in lxc client" if reason: LOG.debug("no console-support: %s", reason) return False else: LOG.debug("console-support looks good") return True
def collect_platform(args, platform_name): """ collect data for platform args: cmdline arguments platform_name: platform to collect for return_value: tuple of results and fail count """ res = ({}, 1) platform_config = config.load_platform_config(platform_name) if not platform_config.get('enabled'): raise ValueError('Platform {} not enabled'.format(platform_name)) component = PlatformComponent( partial(platforms.get_platform, platform_name, platform_config)) LOG.info('setting up platform: %s', platform_name) with component as platform: res = run_stage('collect for platform: {}'.format(platform_name), [ partial(collect_image, args, platform, os_name) for os_name in args.os_name ]) return res
def start(self, wait=True, wait_for_cloud_init=False): """Start instance on EC2 with the platfrom's VPC.""" if self.instance: if self.instance.state['Name'] == 'running': return LOG.debug('starting instance %s', self.instance.id) self.instance.start() else: LOG.debug('launching instance') args = { 'ImageId': self.image_ami, 'InstanceType': self.platform.instance_type, 'KeyName': self.platform.key_name, 'MaxCount': 1, 'MinCount': 1, 'SecurityGroupIds': [self.platform.security_group.id], 'SubnetId': self.platform.subnet.id, 'TagSpecifications': [{ 'ResourceType': 'instance', 'Tags': [{ 'Key': 'Name', 'Value': self.platform.tag }] }], } if self.user_data: args['UserData'] = self.user_data try: instances = self.platform.ec2_resource.create_instances(**args) except botocore.exceptions.ClientError as error: error_msg = error.response['Error']['Message'] raise util.PlatformError('start', error_msg) self.instance = instances[0] LOG.debug('instance id: %s', self.instance.id) if wait: self.instance.wait_until_running() self.instance.reload() self.ssh_ip = self.instance.public_ip_address self._wait_for_system(wait_for_cloud_init)