def collect_image(args, platform, os_name): """Collect data for image. @param args: cmdline arguments @param platform: instantiated platform @param os_name: name of distro to collect for @return_value: tuple of results and fail count """ res = ({}, 1) os_config = config.load_os_config( platform.platform_name, os_name, require_enabled=True, feature_overrides=args.feature_override) LOG.debug('os config: %s', os_config) component = PlatformComponent( partial(platforms.get_image, platform, os_config)) LOG.info('acquiring image for os: %s', os_name) with component as image: res = run_stage('set up and collect data for os: {}'.format(os_name), [partial(setup_image.setup_image, args, image)] + [partial(collect_snapshot, args, image, os_name)], continue_after_error=False) return res
def install_deb(args, image): """Install deb into image. @param args: cmdline arguments, must contain --deb @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ # ensure system is compatible with package format os_family = util.get_os_family(image.properties['os']) if os_family != 'debian': raise NotImplementedError('install deb: {} not supported on os ' 'family: {}'.format(args.deb, os_family)) # install deb msg = 'install deb: "{}" into target'.format(args.deb) LOG.debug(msg) remote_path = os.path.join('/tmp', os.path.basename(args.deb)) image.push_file(args.deb, remote_path) image.execute( ['apt-get', 'install', '--allow-downgrades', '--assume-yes', remote_path], description=msg) # check installed deb version matches package fmt = ['-W', "--showformat=${Version}"] out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0] expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: raise OSError('install deb version "{}" does not match expected "{}"' .format(found_version, expected_version)) LOG.debug('successfully installed: %s, version: %s', args.deb, found_version)
def upgrade(args, image): """ run the system's upgrade command args: cmdline arguments image: cloud_tests.images instance to operate on return_value: None, may raise errors """ # determine appropriate upgrade command for os_family # TODO: maybe use cloudinit.distros for this? os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = 'apt-get update && apt-get upgrade --yes' elif os_family == 'redhat': cmd = 'yum upgrade --assumeyes' else: raise NotImplementedError('upgrade command not configured for distro ' 'from family: {}'.format(os_family)) # upgrade system LOG.debug('upgrading system') (out, err, exit) = image.execute(['/bin/sh', '-c', cmd]) if exit != 0: raise OSError( 'failed to upgrade system\n\tstdout: {}\n\tstderr:{}'.format( out, err))
def install_rpm(args, image): """ install rpm into image args: cmdline arguments, must contain --rpm image: cloud_tests.images instance to operate on return_value: None, may raise errors """ # ensure system is compatible with package format os_family = util.get_os_family(image.properties['os']) if os_family not in ['redhat', 'sles']: raise NotImplementedError('install rpm: {} not supported on os ' 'family: {}'.format(args.rpm, os_family)) # install rpm LOG.debug('installing rpm: %s into target', args.rpm) remote_path = os.path.join('/tmp', os.path.basename(args.rpm)) image.push_file(args.rpm, remote_path) (out, err, exit) = image.execute(['rpm', '-U', remote_path]) if exit != 0: raise OSError( 'failed to install rpm: {}\n\tstdout: {}\n\tstderr: {}'.format( args.rpm, out, err)) fmt = ['--queryformat', '"%{VERSION}"'] (out, err, exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) expected_version = out.strip() (out, err, exit) = image.execute(['rpm', '-q'] + fmt + ['cloud-init']) found_version = out.strip() if expected_version != found_version: raise OSError( 'install rpm version "{}" does not match expected "{}"'.format( found_version, expected_version)) LOG.debug('successfully installed: %s, version %s', args.rpm, found_version)
def destroy(self): """Clean up instance.""" LOG.debug("%s: deleting container.", self) self.unfreeze() self.shutdown() retries = [1] * 5 for attempt, wait in enumerate(retries): try: self.pylxd_container.delete(wait=True) break except Exception: if attempt + 1 >= len(retries): raise LOG.debug('Failed to delete container %s (%s/%s) retrying...', self, attempt + 1, len(retries)) time.sleep(wait) self._pylxd_container = None if self.platform.container_exists(self.name): raise OSError('%s: container was not properly removed' % self) if self._console_log_file and os.path.exists(self._console_log_file): os.unlink(self._console_log_file) shutil.rmtree(self.tmpd) super(LXDInstance, self).destroy()
def install_rpm(args, image): """Install rpm into image. @param args: cmdline arguments, must contain --rpm @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ os_family = util.get_os_family(image.properties['os']) if os_family != 'redhat': raise NotImplementedError('install rpm: {} not supported on os ' 'family: {}'.format(args.rpm, os_family)) # install rpm msg = 'install rpm: "{}" into target'.format(args.rpm) LOG.debug(msg) remote_path = os.path.join('/tmp', os.path.basename(args.rpm)) image.push_file(args.rpm, remote_path) image.execute(['rpm', '-U', remote_path], description=msg) fmt = ['--queryformat', '"%{VERSION}"'] (out, err, exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: raise OSError('install rpm version "{}" does not match expected "{}"' .format(found_version, expected_version)) LOG.debug('successfully installed: %s, version %s', args.rpm, found_version)
def install_deb(args, image): """Install deb into image. @param args: cmdline arguments, must contain --deb @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ # ensure system is compatible with package format os_family = util.get_os_family(image.properties['os']) if os_family != 'debian': raise NotImplementedError('install deb: {} not supported on os ' 'family: {}'.format(args.deb, os_family)) # install deb msg = 'install deb: "{}" into target'.format(args.deb) LOG.debug(msg) remote_path = os.path.join('/tmp', os.path.basename(args.deb)) image.push_file(args.deb, remote_path) image.execute( ['apt-get', 'install', '--allow-downgrades', '--assume-yes', remote_path], description=msg) # check installed deb version matches package fmt = ['-W', "--showformat=${Version}"] (out, err, exit) = image.execute(['dpkg-deb'] + fmt + [remote_path]) expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: raise OSError('install deb version "{}" does not match expected "{}"' .format(found_version, expected_version)) LOG.debug('successfully installed: %s, version: %s', args.deb, found_version)
def install_rpm(args, image): """Install rpm into image. @param args: cmdline arguments, must contain --rpm @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ os_family = util.get_os_family(image.properties['os']) if os_family != 'redhat': raise NotImplementedError('install rpm: {} not supported on os ' 'family: {}'.format(args.rpm, os_family)) # install rpm msg = 'install rpm: "{}" into target'.format(args.rpm) LOG.debug(msg) remote_path = os.path.join('/tmp', os.path.basename(args.rpm)) image.push_file(args.rpm, remote_path) image.execute(['rpm', '-U', remote_path], description=msg) fmt = ['--queryformat', '"%{VERSION}"'] (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: raise OSError('install rpm version "{}" does not match expected "{}"' .format(found_version, expected_version)) LOG.debug('successfully installed: %s, version %s', args.rpm, found_version)
def _has_proper_console_support(): stdout, _ = subp(['lxc', 'info']) info = load_yaml(stdout) reason = None if 'console' not in info.get('api_extensions', []): reason = "LXD server does not support console api extension" else: dver = info.get('environment', {}).get('driver_version', "") if dver.startswith("2.") or dver.startswith("1."): reason = "LXD Driver version not 3.x+ (%s)" % dver else: try: stdout, stderr = subp(['lxc', 'console', '--help'], decode=False) if not (b'console' in stdout and b'log' in stdout): reason = "no '--log' in lxc console --help" except ProcessExecutionError as e: reason = "no 'console' command in lxc client" if reason: LOG.debug("no console-support: %s", reason) return False else: LOG.debug("console-support looks good") return True
def collect_test_data(args, snapshot, os_name, test_name): """Collect data for test case. @param args: cmdline arguments @param snapshot: instantiated snapshot @param test_name: name or path of test to run @return_value: tuple of results and fail count """ res = ({}, 1) # load test config test_name = config.path_to_name(test_name) test_config = config.load_test_config(test_name) user_data = test_config['cloud_config'] test_scripts = test_config['collect_scripts'] test_output_dir = os.sep.join( (args.data_dir, snapshot.platform_name, os_name, test_name)) # if test is not enabled, skip and return 0 failures if not test_config.get('enabled', False): LOG.warning('test config %s is not enabled, skipping', test_name) return ({}, 0) # if testcase requires a feature flag that the image does not support, # skip the testcase with a warning req_features = test_config.get('required_features', []) if any(feature not in snapshot.features for feature in req_features): LOG.warning('test config %s requires features not supported by image, ' 'skipping.\nrequired features: %s\nsupported features: %s', test_name, req_features, snapshot.features) return ({}, 0) # if there are user data overrides required for this test case, apply them overrides = snapshot.config.get('user_data_overrides', {}) if overrides: LOG.debug('updating user data for collect with: %s', overrides) user_data = util.update_user_data(user_data, overrides) # create test instance component = PlatformComponent( partial(platforms.get_instance, snapshot, user_data, block=True, start=False, use_desc=test_name)) LOG.info('collecting test data for test: %s', test_name) with component as instance: start_call = partial(run_single, 'boot instance', partial( instance.start, wait=True, wait_for_cloud_init=True)) collect_calls = [partial(run_single, 'script {}'.format(script_name), partial(collect_script, instance, test_output_dir, script, script_name)) for script_name, script in test_scripts.items()] console_log = partial( run_single, 'collect console', partial(collect_console, instance, test_output_dir)) res = run_stage('collect for test: {}'.format(test_name), [start_call] + collect_calls + [console_log]) return res
def snapshot(self): """Create snapshot of image, block until done. Will return base image_ami if no instance has been booted, otherwise will run the clean script, shutdown the instance, create a custom AMI, and use that AMI once available. """ if not self._img_instance: return EC2Snapshot(self.platform, self.properties, self.config, self.features, self.image_ami, delete_on_destroy=False) if self.config.get('boot_clean_script'): self._img_instance.run_script(self.config.get('boot_clean_script')) self._img_instance.shutdown(wait=True) LOG.debug('creating custom ami from instance %s', self._img_instance.instance.instance_id) response = self.platform.ec2_client.create_image( Name='%s-%s' % (self.platform.tag, self.image_ami), InstanceId=self._img_instance.instance.instance_id ) image_ami_edited = response['ImageId'] # Create image and wait until it is in the 'available' state image = self.platform.ec2_resource.Image(image_ami_edited) image.wait_until_exists() waiter = self.platform.ec2_client.get_waiter('image_available') waiter.wait(ImageIds=[image.id]) image.reload() return EC2Snapshot(self.platform, self.properties, self.config, self.features, image_ami_edited)
def shutdown(self, wait=True): """Shutdown instance.""" if self.pid: # This relies on _execute which uses sudo over ssh. The ssh # connection would get killed before sudo exited, so ignore errors. cmd = ['shutdown', 'now'] try: self._execute(cmd) except util.InTargetExecuteError: pass self._ssh_close() if wait: LOG.debug("Executed shutdown. waiting on pid %s to end", self.pid) time_for_shutdown = 120 give_up_at = time.time() + time_for_shutdown pid_file_path = '/proc/%s' % self.pid msg = ("pid %s did not exit in %s seconds after shutdown." % (self.pid, time_for_shutdown)) while True: if not os.path.exists(pid_file_path): break if time.time() > give_up_at: raise util.PlatformError("shutdown", msg) self.pid = None
def setup_image(args, image): """Set up image as specified in args. @param args: cmdline arguments @param image: cloud_tests.image instance to operate on @return_value: tuple of results and fail count """ # update the args if necessary for this image overrides = image.setup_overrides LOG.debug('updating args for setup with: %s', overrides) args = util.update_args(args, overrides, preserve_old=True) # mapping of setup cmdline arg name to setup function # represented as a tuple rather than a dict or odict as lookup by name not # needed, and order is important as --script and --upgrade go at the end handlers = ( # arg handler description ('deb', install_deb, 'setup func for --deb, install deb'), ('rpm', install_rpm, 'setup func for --rpm, install rpm'), ('repo', enable_repo, 'setup func for --repo, enable repo'), ('ppa', enable_ppa, 'setup func for --ppa, enable ppa'), ('script', run_script, 'setup func for --script, run script'), ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'), ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'), ) # determine which setup functions needed calls = [partial(stage.run_single, desc, partial(func, args, image)) for name, func, desc in handlers if getattr(args, name, None)] LOG.info('setting up %s', image) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) return res
def _create_nic(self): """Create network interface controller""" LOG.debug('creating nic') nic_name = '%s-nic' % self.resource_group.name nic_params = { 'location': self.location, 'ip_configurations': [{ 'name': 'ipconfig', 'subnet': { 'id': self.subnet.id }, 'publicIpAddress': { 'id': "/subscriptions/%s" "/resourceGroups/%s/providers/Microsoft.Network" "/publicIPAddresses/%s" % (self.subscription_id, self.resource_group.name, self.public_ip.name), } }] } nic = self.network_client.network_interfaces.create_or_update( self.resource_group.name, nic_name, nic_params) return nic.result()
def shutdown(self, wait=True): """Shutdown instance.""" LOG.debug('stopping instance %s', self.instance.id) self.instance.stop() if wait: self.instance.wait_until_stopped() self.instance.reload()
def collect_console(instance, base_dir): LOG.debug('getting console log') try: data = instance.console_log() except NotImplementedError: data = b'instance.console_log: not implemented' with open(os.path.join(base_dir, 'console.log'), "wb") as fp: fp.write(data)
def _create_internet_gateway(self): """Create Internet Gateway and assign to VPC.""" LOG.debug('creating internet gateway') internet_gateway = self.ec2_resource.create_internet_gateway() internet_gateway.attach_to_vpc(VpcId=self.vpc.id) self._tag_resource(internet_gateway) return internet_gateway
def run_script(self, *args, **kwargs): """Run script in image, modifying image. @return_value: script output """ LOG.debug('running script on image') self._instance.start() return self._instance.run_script(*args, **kwargs)
def _create_resource_group(self): """Create resource group""" LOG.debug('creating resource group') resource_group_name = self.tag resource_group_params = {'location': self.location} resource_group = self.resource_client.resource_groups.create_or_update( resource_group_name, resource_group_params) return resource_group
def azure_location_to_simplestreams_region(self): """Convert location to simplestreams region""" location = self.location.lower().replace(' ', '') LOG.debug('finding location %s using simple streams', location) regions_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'regions.json') region_simplestreams_map = c_util.load_json( c_util.load_file(regions_file)) return region_simplestreams_map.get(location, location)
def destroy(self): """Delete the instance used to create a custom image.""" if self._img_instance: LOG.debug('terminating backing instance %s', self._img_instance.instance.instance_id) self._img_instance.instance.terminate() self._img_instance.instance.wait_until_terminated() super(EC2Image, self).destroy()
def __enter__(self): """Create tempdir. @return_value: tempdir path """ if not self.tmpdir: self.tmpdir = mkdtemp(prefix=self.prefix) LOG.debug('using tmpdir: %s', self.tmpdir) return self.tmpdir
def _create_subnet(self): """create sub-network""" LOG.debug('creating subnet') subnet_name = '%s-subnet' % self.resource_group.name subnet_params = {'address_prefix': '10.0.0.0/24'} subnet = self.network_client.subnets.create_or_update( self.resource_group.name, self.vnet.name, subnet_name, subnet_params) return subnet.result()
def run_script(args, image): """Run a script in the target image. @param args: cmdline arguments, must contain --script @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ msg = 'run setup image script in target image' LOG.debug(msg) image.run_script(args.script, description=msg)
def _create_security_group(self): """Enables ingress to default VPC security group.""" LOG.debug('creating security group') security_group = self.vpc.create_security_group( GroupName=self.tag, Description='integration test security group') security_group.authorize_ingress( IpProtocol='-1', FromPort=-1, ToPort=-1, CidrIp='0.0.0.0/0') self._tag_resource(security_group) return security_group
def destroy(self): """Clean up instance.""" if self.instance: LOG.debug('destroying instance %s', self.instance.id) self.instance.terminate() self.instance.wait_until_terminated() self._ssh_close() super(EC2Instance, self).destroy()
def destroy(self): """Delete the instance used to create a custom image.""" if self._img_instance: LOG.debug('Deleting backing instance %s', self._img_instance.vm_name) delete_vm = self.platform.compute_client.virtual_machines.delete( self.platform.resource_group.name, self._img_instance.vm_name) delete_vm.wait() super(AzureCloudImage, self).destroy()
def run_script(args, image): """ run a script in the target image args: cmdline arguments, must contain --script image: cloud_tests.images instance to operate on return_value: None, may raise errors """ # TODO: get exit status back from script and add error handling here LOG.debug('running setup image script in target image') image.run_script(args.script)
def destroy(self): """Delete VM and close all connections""" if self.instance: LOG.debug('destroying instance: %s', self.image_id) vm_delete = self.platform.compute_client.virtual_machines.delete( self.platform.resource_group.name, self.image_id) vm_delete.wait() self._ssh_close() super(AzureCloudInstance, self).destroy()
def _create_public_ip_address(self): """Create public ip address""" LOG.debug('creating public ip address') public_ip_name = '%s-ip' % self.resource_group.name public_ip_params = { 'location': self.location, 'public_ip_allocation_method': 'Dynamic' } ip = self.network_client.public_ip_addresses.create_or_update( self.resource_group.name, public_ip_name, public_ip_params) return ip.result()
def destroy(self): """Deregister the backing AMI.""" if self.delete_on_destroy: image = self.platform.ec2_resource.Image(self.image_ami) snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId'] LOG.debug('removing custom ami %s', self.image_ami) self.platform.ec2_client.deregister_image(ImageId=self.image_ami) LOG.debug('removing custom snapshot %s', snapshot_id) self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id)
def collect_script(instance, base_dir, script, script_name): """ collect script data instance: instance to run script on base_dir: base directory for output data script: script contents script_name: name of script to run return_value: None, may raise errors """ LOG.debug('running collect script: %s', script_name) util.write_file(os.path.join(base_dir, script_name), instance.run_script(script))
def _create_vnet(self): """create virtual network""" LOG.debug('creating vnet') vnet_name = '%s-vnet' % self.resource_group.name vnet_params = { 'location': self.location, 'address_space': { 'address_prefixes': ['10.0.0.0/16'] } } vnet = self.network_client.virtual_networks.create_or_update( self.resource_group.name, vnet_name, vnet_params) return vnet.result()
def destroy(self): """Clean up instance.""" LOG.debug("%s: deleting container.", self) self.unfreeze() self.shutdown() self.pylxd_container.delete(wait=True) self._pylxd_container = None if self.platform.container_exists(self.name): raise OSError('%s: container was not properly removed' % self) if self._console_log_file and os.path.exists(self._console_log_file): os.unlink(self._console_log_file) shutil.rmtree(self.tmpd) super(LXDInstance, self).destroy()
def collect(args): """Entry point for collection. @param args: cmdline arguments @return_value: fail count """ (res, failed) = run_stage( 'collect data', [partial(collect_platform, args, platform_name) for platform_name in args.platform]) LOG.debug('collect stages: %s', res) if args.result: util.merge_results({'collect_stages': res}, args.result) return failed
def collect_console(instance, base_dir): """Collect instance console log. @param instance: instance to get console log for @param base_dir: directory to write console log to """ logfile = os.path.join(base_dir, 'console.log') LOG.debug('getting console log for %s to %s', instance.name, logfile) try: data = instance.console_log() except NotImplementedError as e: # args[0] is hacky, but thats all I see to get at the message. data = b'NotImplementedError:' + e.args[0].encode() with open(logfile, "wb") as fp: fp.write(data)
def verify_data(data_dir, platform, os_name, tests): """Verify test data is correct. @param data_dir: top level directory for all tests @param platform: The platform name we for this test data (e.g. lxd) @param os_name: The operating system under test (xenial, artful, etc.). @param tests: list of test names @return_value: {<test_name>: {passed: True/False, failures: []}} """ base_dir = os.sep.join((data_dir, platform, os_name)) runner = unittest2.TextTestRunner(verbosity=util.current_verbosity()) res = {} for test_name in tests: LOG.debug('verifying test data for %s', test_name) # get cloudconfig for test test_conf = config.load_test_config(test_name) test_module = config.name_to_module(test_name) cloud_conf = test_conf['cloud_config'] # load script outputs data = {'platform': platform, 'os_name': os_name} test_dir = os.path.join(base_dir, test_name) for script_name in os.listdir(test_dir): with open(os.path.join(test_dir, script_name), 'rb') as fp: data[script_name] = fp.read() # get test suite and launch tests suite = testcases.get_suite(test_module, data, cloud_conf) suite_results = runner.run(suite) res[test_name] = { 'passed': suite_results.wasSuccessful(), 'failures': [{'module': type(test_class).__base__.__module__, 'class': type(test_class).__base__.__name__, 'function': str(test_class).split()[0], 'error': trace.splitlines()[-1], 'traceback': trace, } for test_class, trace in suite_results.failures] } for failure in res[test_name]['failures']: LOG.warning('test case: %s failed %s.%s with: %s', test_name, failure['class'], failure['function'], failure['error']) return res
def enable_ppa(args, image): """Enable a ppa in the target image. @param args: cmdline arguments, must contain --ppa @param image: cloud_tests.image instance to operate on @return_value: None, may raise errors """ # ppa only supported on ubuntu (maybe debian?) if image.properties['os'].lower() != 'ubuntu': raise NotImplementedError('enabling a ppa is only available on ubuntu') # add ppa with add-apt-repository and update ppa = 'ppa:{}'.format(args.ppa) msg = 'enable ppa: "{}" in target'.format(ppa) LOG.debug(msg) cmd = 'add-apt-repository --yes {} && apt-get update'.format(ppa) image.execute(cmd, description=msg)
def console_log_method(self): if self._console_log_method is not None: return self._console_log_method client = which('lxc') if not client: raise PlatformError("No 'lxc' client.") elif _has_proper_console_support(): self._console_log_method = 'show-log' elif client.startswith("/snap"): self._console_log_method = 'logfile-snap' else: self._console_log_method = 'logfile-tmp' LOG.debug("Set console log method to %s", self._console_log_method) return self._console_log_method
def upgrade(args, image): """Upgrade or install cloud-init from repo. @param args: cmdline arguments @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = 'apt-get update && apt-get install cloud-init --yes' elif os_family == 'redhat': cmd = 'sleep 10 && yum install cloud-init --assumeyes' else: raise NotImplementedError msg = 'upgrading cloud-init' LOG.debug(msg) image.execute(cmd, description=msg)
def upgrade_full(args, image): """Run the system's full upgrade command. @param args: cmdline arguments @param image: cloud_tests.images instance to operate on @return_value: None, may raise errors """ os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = 'apt-get update && apt-get upgrade --yes' elif os_family == 'redhat': cmd = 'yum upgrade --assumeyes' else: raise NotImplementedError('upgrade command not configured for distro ' 'from family: {}'.format(os_family)) msg = 'full system upgrade' LOG.debug(msg) image.execute(cmd, description=msg)
def collect_snapshot(args, image, os_name): """Collect data for snapshot of image. @param args: cmdline arguments @param image: instantiated image with set up complete @return_value tuple of results and fail count """ res = ({}, 1) component = PlatformComponent(partial(platforms.get_snapshot, image)) LOG.debug('creating snapshot for %s', os_name) with component as snapshot: LOG.info('collecting test data for os: %s', os_name) res = run_stage( 'collect test data for {}'.format(os_name), [partial(collect_test_data, args, snapshot, os_name, test_name) for test_name in args.test_config]) return res
def setup_image(args, image): """Set up image as specified in args. @param args: cmdline arguments @param image: cloud_tests.image instance to operate on @return_value: tuple of results and fail count """ # update the args if necessary for this image overrides = image.setup_overrides LOG.debug('updating args for setup with: %s', overrides) args = util.update_args(args, overrides, preserve_old=True) # mapping of setup cmdline arg name to setup function # represented as a tuple rather than a dict or odict as lookup by name not # needed, and order is important as --script and --upgrade go at the end handlers = ( # arg handler description ('deb', install_deb, 'setup func for --deb, install deb'), ('rpm', install_rpm, 'setup func for --rpm, install rpm'), ('repo', enable_repo, 'setup func for --repo, enable repo'), ('ppa', enable_ppa, 'setup func for --ppa, enable ppa'), ('script', run_script, 'setup func for --script, run script'), ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'), ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'), ) # determine which setup functions needed calls = [partial(stage.run_single, desc, partial(func, args, image)) for name, func, desc in handlers if getattr(args, name, None)] try: data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True)) info = ' '.join(["%s=%s" % (k, data.get(k)) for k in ("build_name", "serial") if k in data]) except Exception as e: info = "N/A (%s)" % e LOG.info('setting up %s (%s)', image, info) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) return res
def enable_repo(args, image): """Enable a repository in the target image. @param args: cmdline arguments, must contain --repo @param image: cloud_tests.image instance to operate on @return_value: None, may raise errors """ # find enable repo command for the distro os_family = util.get_os_family(image.properties['os']) if os_family == 'debian': cmd = ('echo "{}" >> "/etc/apt/sources.list" '.format(args.repo) + '&& apt-get update') elif os_family == 'centos': cmd = 'yum-config-manager --add-repo="{}"'.format(args.repo) else: raise NotImplementedError('enable repo command not configured for ' 'distro from family: {}'.format(os_family)) msg = 'enable repo: "{}" in target'.format(args.repo) LOG.debug(msg) image.execute(cmd, description=msg)
def shutdown(self, wait=True, retry=1): """Shutdown instance.""" if self.pylxd_container.status == 'Stopped': return try: LOG.debug("%s: shutting down (wait=%s)", self, wait) self.pylxd_container.stop(wait=wait) except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e: # An exception happens here sometimes (LP: #1783198) # LOG it, and try again. LOG.warning( ("%s: shutdown(retry=%d) caught %s in shutdown " "(response=%s): %s"), self, retry, e.__class__.__name__, e.response, e) if isinstance(e, pylxd_exc.NotFound): LOG.debug("container_exists(%s) == %s", self.name, self.platform.container_exists(self.name)) if retry == 0: raise e return self.shutdown(wait=wait, retry=retry - 1)
def collect_script(instance, base_dir, script, script_name): """Collect script data. @param instance: instance to run script on @param base_dir: base directory for output data @param script: script contents @param script_name: name of script to run @return_value: None, may raise errors """ LOG.debug('running collect script: %s', script_name) (out, err, exit) = instance.run_script( script.encode(), rcs=False, description='collect: {}'.format(script_name)) if err: LOG.debug("collect script %s exited '%s' and had stderr: %s", script_name, err, exit) if not isinstance(out, bytes): raise util.PlatformError( "Collection of '%s' returned type %s, expected bytes: %s" % (script_name, type(out), out)) c_util.write_file(os.path.join(base_dir, script_name), out)
def verify(args): """Verify test data. @param args: directory of test data @return_value: 0 for success, or number of failed tests """ failed = 0 res = {} # find test data tests = util.list_test_data(args.data_dir) for platform in tests.keys(): res[platform] = {} for os_name in tests[platform].keys(): test_name = "platform='{}', os='{}'".format(platform, os_name) LOG.info('test: %s verifying test data', test_name) # run test res[platform][os_name] = verify_data( args.data_dir, platform, os_name, tests[platform][os_name]) # handle results fail_list = [k for k, v in res[platform][os_name].items() if not v.get('passed')] if len(fail_list) == 0: LOG.info('test: %s passed all tests', test_name) else: LOG.warning('test: %s failed %s tests', test_name, len(fail_list)) failed += len(fail_list) # dump results LOG.debug('\n---- Verify summarized results:\n%s', format_results(res)) if args.result: util.merge_results({'verify': res}, args.result) return failed
def collect_platform(args, platform_name): """Collect data for platform. @param args: cmdline arguments @param platform_name: platform to collect for @return_value: tuple of results and fail count """ res = ({}, 1) platform_config = config.load_platform_config( platform_name, require_enabled=True) platform_config['data_dir'] = args.data_dir LOG.debug('platform config: %s', platform_config) component = PlatformComponent( partial(platforms.get_platform, platform_name, platform_config)) LOG.info('setting up platform: %s', platform_name) with component as platform: res = run_stage('collect for platform: {}'.format(platform_name), [partial(collect_image, args, platform, os_name) for os_name in args.os_name]) return res
def _ssh_connect(self): """Connect via SSH. Attempt to SSH to the client on the specific IP and port. If it fails in some manner, then retry 2 more times for a total of 3 attempts; sleeping a few seconds between attempts. """ if self._ssh_client: return self._ssh_client if not self.ssh_ip or not self.ssh_port: raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" % (self.ssh_ip, self.ssh_port)) client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) retries = 3 while retries: try: client.connect(username=self.ssh_username, hostname=self.ssh_ip, port=self.ssh_port, pkey=private_key) self._ssh_client = client return client except (ConnectionRefusedError, AuthenticationException, BadHostKeyException, ConnectionResetError, SSHException, OSError): retries -= 1 LOG.debug('Retrying ssh connection on connect failure') time.sleep(3) ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % ( self.ssh_username, self.ssh_ip, self.ssh_port ) raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
def _has_proper_console_support(): stdout, _ = subp(['lxc', 'info']) info = load_yaml(stdout) reason = None if 'console' not in info.get('api_extensions', []): reason = "LXD server does not support console api extension" else: dver = str(info.get('environment', {}).get('driver_version', "")) if dver.startswith("2.") or dver.startswith("1."): reason = "LXD Driver version not 3.x+ (%s)" % dver else: try: stdout = subp(['lxc', 'console', '--help'], decode=False)[0] if not (b'console' in stdout and b'log' in stdout): reason = "no '--log' in lxc console --help" except ProcessExecutionError: reason = "no 'console' command in lxc client" if reason: LOG.debug("no console-support: %s", reason) return False else: LOG.debug("console-support looks good") return True
def start(self, wait=True, wait_for_cloud_init=False): """Start instance on EC2 with the platfrom's VPC.""" if self.instance: if self.instance.state['Name'] == 'running': return LOG.debug('starting instance %s', self.instance.id) self.instance.start() else: LOG.debug('launching instance') args = { 'ImageId': self.image_ami, 'InstanceType': self.platform.instance_type, 'KeyName': self.platform.key_name, 'MaxCount': 1, 'MinCount': 1, 'SecurityGroupIds': [self.platform.security_group.id], 'SubnetId': self.platform.subnet.id, 'TagSpecifications': [{ 'ResourceType': 'instance', 'Tags': [{ 'Key': 'Name', 'Value': self.platform.tag }] }], } if self.user_data: args['UserData'] = self.user_data try: instances = self.platform.ec2_resource.create_instances(**args) except botocore.exceptions.ClientError as error: error_msg = error.response['Error']['Message'] raise util.PlatformError('start', error_msg) self.instance = instances[0] LOG.debug('instance id: %s', self.instance.id) if wait: self.instance.wait_until_running() self.instance.reload() self.ssh_ip = self.instance.public_ip_address self._wait_for_system(wait_for_cloud_init)
def build_deb(args, instance): """Build deb on system and copy out to location at args.deb. @param args: cmdline arguments @return_value: tuple of results and fail count """ # update remote system package list and install build deps LOG.debug('installing pre-reqs') pkgs = ' '.join(pre_reqs) instance.execute('apt-get update && apt-get install --yes {}'.format(pkgs)) # local tmpfile that must be deleted local_tarball = tempfile.NamedTemporaryFile().name # paths to use in remote system output_link = '/root/cloud-init_all.deb' remote_tarball = _out(instance.execute(['mktemp'])) extract_dir = '/root' bddeb_path = os.path.join(extract_dir, 'packages', 'bddeb') git_env = {'GIT_DIR': os.path.join(extract_dir, '.git'), 'GIT_WORK_TREE': extract_dir} LOG.debug('creating tarball of cloud-init at: %s', local_tarball) c_util.subp(['tar', 'cf', local_tarball, '--owner', 'root', '--group', 'root', '-C', args.cloud_init, '.']) LOG.debug('copying to remote system at: %s', remote_tarball) instance.push_file(local_tarball, remote_tarball) LOG.debug('extracting tarball in remote system at: %s', extract_dir) instance.execute(['tar', 'xf', remote_tarball, '-C', extract_dir]) instance.execute(['git', 'commit', '-a', '-m', 'tmp', '--allow-empty'], env=git_env) LOG.debug('installing deps') deps_path = os.path.join(extract_dir, 'tools', 'read-dependencies') instance.execute([deps_path, '--install', '--test-distro', '--distro', 'ubuntu', '--python-version', '3']) LOG.debug('building deb in remote system at: %s', output_link) bddeb_args = args.bddeb_args.split() if args.bddeb_args else [] instance.execute([bddeb_path, '-d'] + bddeb_args, env=git_env) # copy the deb back to the host system LOG.debug('copying built deb to host at: %s', args.deb) instance.pull_file(output_link, args.deb)
def collect_test_data(args, snapshot, os_name, test_name): """Collect data for test case. @param args: cmdline arguments @param snapshot: instantiated snapshot @param test_name: name or path of test to run @return_value: tuple of results and fail count """ res = ({}, 1) # load test config test_name_in = test_name test_name = config.path_to_name(test_name) test_config = config.load_test_config(test_name) user_data = test_config['cloud_config'] test_scripts = test_config['collect_scripts'] test_output_dir = os.sep.join( (args.data_dir, snapshot.platform_name, os_name, test_name)) # if test is not enabled, skip and return 0 failures if not test_config.get('enabled', False): LOG.warning('test config %s is not enabled, skipping', test_name) return ({}, 0) test_class = get_test_class( config.name_to_module(test_name_in), test_data={'platform': snapshot.platform_name, 'os_name': os_name}, test_conf=test_config['cloud_config']) try: test_class.maybeSkipTest() except base.SkipTest as s: LOG.warning('skipping test config %s: %s', test_name, s) return ({}, 0) # if testcase requires a feature flag that the image does not support, # skip the testcase with a warning req_features = test_config.get('required_features', []) if any(feature not in snapshot.features for feature in req_features): LOG.warning('test config %s requires features not supported by image, ' 'skipping.\nrequired features: %s\nsupported features: %s', test_name, req_features, snapshot.features) return ({}, 0) # if there are user data overrides required for this test case, apply them overrides = snapshot.config.get('user_data_overrides', {}) if overrides: LOG.debug('updating user data for collect with: %s', overrides) user_data = util.update_user_data(user_data, overrides) # create test instance component = PlatformComponent( partial(platforms.get_instance, snapshot, user_data, block=True, start=False, use_desc=test_name), preserve_instance=args.preserve_instance) LOG.info('collecting test data for test: %s', test_name) with component as instance: start_call = partial(run_single, 'boot instance', partial( instance.start, wait=True, wait_for_cloud_init=True)) collect_calls = [partial(run_single, 'script {}'.format(script_name), partial(collect_script, instance, test_output_dir, script, script_name)) for script_name, script in test_scripts.items()] res = run_stage('collect for test: {}'.format(test_name), [start_call] + collect_calls) instance.shutdown() collect_console(instance, test_output_dir) return res