def validate_manifest(data, validator, error): validator(data, rel_path(__file__, 'manifest-schema.yml')) from bootstrapvz.common.bytes import Bytes if data['volume']['backing'] == 'ebs': volume_size = Bytes(0) for key, partition in data['volume']['partitions'].iteritems(): if key != 'type': volume_size += Bytes(partition['size']) if int(volume_size % Bytes('1GiB')) != 0: msg = ('The volume size must be a multiple of 1GiB when using EBS backing') error(msg, ['volume', 'partitions']) else: validator(data, rel_path(__file__, 'manifest-schema-s3.yml')) bootloader = data['system']['bootloader'] virtualization = data['provider']['virtualization'] backing = data['volume']['backing'] partition_type = data['volume']['partitions']['type'] enhanced_networking = data['provider']['enhanced_networking'] if 'enhanced_networking' in data['provider'] else None if virtualization == 'pvm' and bootloader != 'pvgrub': error('Paravirtualized AMIs only support pvgrub as a bootloader', ['system', 'bootloader']) if backing != 'ebs' and virtualization == 'hvm': error('HVM AMIs currently only work when they are EBS backed', ['volume', 'backing']) if backing == 's3' and partition_type != 'none': error('S3 backed AMIs currently only work with unpartitioned volumes', ['system', 'bootloader']) if enhanced_networking == 'simple' and virtualization != 'hvm': error('Enhanced networking only works with HVM virtualization', ['provider', 'virtualization'])
def run(cls, info): from bootstrapvz.common.tools import rel_path for i, file_entry in enumerate(info.manifest.plugins['file_copy']['files']): if not os.path.exists(rel_path(info.manifest.path, file_entry['src'])): msg = 'The source file %s does not exist.' % file_entry['src'] info.manifest.validation_error(msg, ['plugins', 'file_copy', 'files', i])
def run(cls, info): from bootstrapvz.common.tools import log_check_call # Extract playbook and directory playbook = rel_path(info.manifest.path, info.manifest.plugins['ansible']['playbook']) # build the inventory file inventory = os.path.join(info.root, 'tmp/bootstrap-inventory') with open(inventory, 'w') as handle: conn = '{} ansible_connection=chroot'.format(info.root) content = "" if 'groups' in info.manifest.plugins['ansible']: for group in info.manifest.plugins['ansible']['groups']: content += '[{}]\n{}\n'.format(group, conn) else: content = conn handle.write(content) # build the ansible command cmd = ['ansible-playbook', '-i', inventory, playbook] if 'extra_vars' in info.manifest.plugins['ansible']: cmd.extend(['--extra-vars', json.dumps(info.manifest.plugins['ansible']['extra_vars'])]) if 'tags' in info.manifest.plugins['ansible']: cmd.extend(['--tags', ','.join(info.manifest.plugins['ansible']['tags'])]) if 'skip_tags' in info.manifest.plugins['ansible']: cmd.extend(['--skip-tags', ','.join(info.manifest.plugins['ansible']['skip_tags'])]) if 'opt_flags' in info.manifest.plugins['ansible']: # Should probably do proper validation on these, but I don't think it should be used very often. cmd.extend(info.manifest.plugins['ansible']['opt_flags']) # Run and remove the inventory file log_check_call(cmd) os.remove(inventory)
def run(cls, info): from shutil import copy for rel_key_path in info.manifest.packages['trusted-keys']: key_path = rel_path(info.manifest.path, rel_key_path) key_name = os.path.basename(key_path) destination = os.path.join(info.root, 'etc/apt/trusted.gpg.d', key_name) copy(key_path, destination)
def run(cls, info): from bootstrapvz.common.tools import log_call for i, rel_key_path in enumerate(info.manifest.packages.get('trusted-keys', {})): key_path = rel_path(info.manifest.path, rel_key_path) if not os.path.isfile(key_path): info.manifest.validation_error('File not found: {}'.format(key_path), ['packages', 'trusted-keys', i]) from tempfile import mkdtemp from shutil import rmtree tempdir = mkdtemp() status, _, _ = log_call( ['gpg', '--quiet', '--homedir', tempdir, '--keyring', key_path, '-k'] ) rmtree(tempdir) if status != 0: info.manifest.validation_error('Invalid GPG keyring: {}'.format(key_path), ['packages', 'trusted-keys', i])
def pick_build_server(build_servers, manifest, preferences={}): # Validate the build servers list from bootstrapvz.common.tools import load_data, rel_path schema = load_data(rel_path(__file__, 'build-servers-schema.yml')) import jsonschema jsonschema.validate(build_servers, schema) if manifest['provider']['name'] == 'ec2': must_bootstrap = 'ec2-' + manifest['volume']['backing'] else: must_bootstrap = manifest['provider']['name'] def matches(name, settings): if preferences.get('name', name) != name: return False if preferences.get('release', settings['release']) != settings['release']: return False if must_bootstrap not in settings['can_bootstrap']: return False return True for name, settings in build_servers.iteritems(): if not matches(name, settings): continue if settings['type'] == 'local': from local import LocalBuildServer return LocalBuildServer(name, settings) else: from remote import RemoteBuildServer return RemoteBuildServer(name, settings) raise Exception('Unable to find a build server that matches your preferences.')
def run(cls, info): guest_additions_path = rel_path( info.manifest.path, info.manifest.provider['guest_additions']) if not os.path.exists(guest_additions_path): msg = 'The file {file} does not exist.'.format( file=guest_additions_path) raise TaskError(msg)
def run(cls, info): from bootstrapvz.common.tools import log_call, log_check_call for line in log_check_call(['chroot', info.root, 'apt-cache', 'show', info.kernel['headers_pkg']]): key, value = line.split(':') if key.strip() == 'Depends': kernel_version = value.strip().split('linux-headers-')[-1] break guest_additions_path = rel_path(info.manifest.path, info.manifest.provider['guest_additions']) mount_dir = 'mnt/guest_additions' mount_path = os.path.join(info.root, mount_dir) os.mkdir(mount_path) root = info.volume.partition_map.root root.add_mount(guest_additions_path, mount_path, ['-o', 'loop']) install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run') install_wrapper_name = 'install_guest_additions.sh' install_wrapper = open(os.path.join(assets, install_wrapper_name)) \ .read() \ .replace("KERNEL_VERSION", kernel_version) \ .replace("KERNEL_ARCH", info.kernel['arch']) \ .replace("INSTALL_SCRIPT", install_script) install_wrapper_path = os.path.join(info.root, install_wrapper_name) with open(install_wrapper_path, 'w') as f: f.write(install_wrapper + '\n') # Don't check the return code of the scripts here, because 1 not necessarily means they have failed log_call(['chroot', info.root, 'bash', '/' + install_wrapper_name]) # VBoxService process could be running, as it is not affected by DisableDaemonAutostart log_call(['chroot', info.root, 'service', 'vboxadd-service', 'stop']) root.remove_mount(mount_path) os.rmdir(mount_path) os.remove(install_wrapper_path)
def run(cls, info): kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') kernel_package = config_get(kernel_packages_path, [ info.manifest.release.codename, info.manifest.system['architecture'] ]) info.packages.add(kernel_package)
def run(cls, info): from bootstrapvz.common.tools import log_call for i, rel_key_path in enumerate( info.manifest.packages.get('trusted-keys', {})): key_path = rel_path(info.manifest.path, rel_key_path) if not os.path.isfile(key_path): info.manifest.validation_error( 'File not found: {}'.format(key_path), ['packages', 'trusted-keys', i]) from tempfile import mkdtemp from shutil import rmtree tempdir = mkdtemp() status, _, _ = log_call([ 'gpg', '--quiet', '--homedir', tempdir, '--keyring', key_path, '-k' ]) rmtree(tempdir) if status != 0: info.manifest.validation_error( 'Invalid GPG keyring: {}'.format(key_path), ['packages', 'trusted-keys', i])
def run(cls, info): # Extract playbook and directory playbook = rel_path(info.manifest.path, info.manifest.plugins['ansible']['playbook']) # build the inventory file inventory = os.path.join(info.root, 'tmp/bootstrap-inventory') with open(inventory, 'w') as handle: conn = '{} ansible_connection=chroot'.format(info.root) content = "" if 'groups' in info.manifest.plugins['ansible']: for group in info.manifest.plugins['ansible']['groups']: content += '[{}]\n{}\n'.format(group, conn) else: content = conn handle.write(content) # build the ansible command cmd = ['ansible-playbook', '-i', inventory, playbook] if 'extra_vars' in info.manifest.plugins['ansible']: cmd.extend(['--extra-vars', json.dumps(info.manifest.plugins['ansible']['extra_vars'])]) if 'tags' in info.manifest.plugins['ansible']: cmd.extend(['--tags', ','.join(info.manifest.plugins['ansible']['tags'])]) if 'skip_tags' in info.manifest.plugins['ansible']: cmd.extend(['--skip-tags', ','.join(info.manifest.plugins['ansible']['skip_tags'])]) if 'opt_flags' in info.manifest.plugins['ansible']: # Should probably do proper validation on these, but I don't think it should be used very often. cmd.extend(info.manifest.plugins['ansible']['opt_flags']) # Run and remove the inventory file log_check_call(cmd) os.remove(inventory)
def validate_manifest(data, validator, error): validator(data, rel_path(__file__, 'manifest-schema.yml')) vagrant_provider = data['plugins']['vagrant'].get('provider', 'virtualbox') if vagrant_provider == 'virtualbox' and data['volume']['backing'] != 'vmdk': error('Virtualbox vagrant boxes support vmdk images only', ['plugins', 'vagrant', 'provider']) if vagrant_provider == 'libvirt' and data['volume']['backing'] != 'qcow2': error('Libvirt vagrant boxes support qcow2 images only', ['plugins', 'vagrant', 'provider'])
def pick_build_server(build_servers, manifest, preferences={}): # Validate the build servers list from bootstrapvz.common.tools import load_data, rel_path schema = load_data(rel_path(__file__, 'build-servers-schema.yml')) import jsonschema jsonschema.validate(build_servers, schema) if manifest['provider']['name'] == 'ec2': must_bootstrap = 'ec2-' + manifest['volume']['backing'] else: must_bootstrap = manifest['provider']['name'] def matches(name, settings): if preferences.get('name', name) != name: return False if preferences.get('release', settings['release']) != settings['release']: return False if must_bootstrap not in settings['can_bootstrap']: return False return True for name, settings in build_servers.iteritems(): if not matches(name, settings): continue if settings['type'] == 'local': # pylint: disable=no-else-return from .local import LocalBuildServer return LocalBuildServer(name, settings) else: from .remote import RemoteBuildServer return RemoteBuildServer(name, settings) raise Exception('Unable to find a build server that matches your preferences.')
def validate_manifest(data, validator, error): from bootstrapvz.common.tools import rel_path validator(data, rel_path(__file__, 'manifest-schema.yml')) if data['plugins']['minimize_size'].get( 'shrink', False) and data['volume']['backing'] != 'vmdk': error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink'])
def run(cls, info): from bootstrapvz.common.tools import config_get, rel_path kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') kernel_package = config_get(kernel_packages_path, [ info.manifest.release.codename, info.manifest.system['architecture'] ]) info.packages.add(kernel_package)
def get_all_tasks(loaded_modules): """Gets a list of all task classes in the package :return: A list of all tasks in the package :rtype: list """ import pkgutil import os.path import bootstrapvz from bootstrapvz.common.tools import rel_path module_paths = set([(rel_path(bootstrapvz.__file__, 'common/tasks'), 'bootstrapvz.common.tasks.')]) for module in loaded_modules: module_path = os.path.dirname(module.__file__) module_prefix = module.__name__ + '.' module_paths.add((module_path, module_prefix)) providers = rel_path(bootstrapvz.__file__, 'providers') for module_loader, module_name, ispkg in pkgutil.iter_modules( [providers, 'bootstrapvz.providers']): module_path = os.path.join(module_loader.path, module_name) # The prefix param seems to do nothing, so we prefix the module name ourselves module_prefix = 'bootstrapvz.providers.{}.'.format(module_name) module_paths.add((module_path, module_prefix)) plugins = rel_path(bootstrapvz.__file__, 'plugins') for module_loader, module_name, ispkg in pkgutil.iter_modules( [plugins, 'bootstrapvz.plugins']): module_path = os.path.join(module_loader.path, module_name) module_prefix = 'bootstrapvz.plugins.{}.'.format(module_name) module_paths.add((module_path, module_prefix)) # Get generators that return all classes in a module generators = [] for (module_path, module_prefix) in module_paths: generators.append(get_all_classes(module_path, module_prefix)) import itertools classes = itertools.chain(*generators) # lambda function to check whether a class is a task (excluding the superclass Task) def is_task(obj): from task import Task return issubclass(obj, Task) and obj is not Task return filter(is_task, classes) # Only return classes that are tasks
def run(cls, info): registration_params = { 'Name': info._ec2['ami_name'], 'Description': info._ec2['ami_description'] } registration_params['Architecture'] = { 'i386': 'i386', 'amd64': 'x86_64' }.get(info.manifest.system['architecture']) if info.manifest.volume['backing'] == 's3': registration_params['ImageLocation'] = info._ec2[ 'manifest_location'] else: root_dev_name = { 'pvm': '/dev/sda', 'hvm': '/dev/xvda' }.get(info.manifest.provider['virtualization']) registration_params['RootDeviceName'] = root_dev_name block_device = [{ 'DeviceName': root_dev_name, 'Ebs': { 'SnapshotId': info._ec2['snapshot'], 'VolumeSize': info.volume.size.bytes.get_qty_in('GiB'), 'VolumeType': 'gp2', 'DeleteOnTermination': True } }] registration_params['BlockDeviceMappings'] = block_device if info.manifest.provider['virtualization'] == 'hvm': registration_params['VirtualizationType'] = 'hvm' else: registration_params['VirtualizationType'] = 'paravirtual' akis_path = rel_path(__file__, 'ami-akis.yml') from bootstrapvz.common.tools import config_get registration_params['kernel_id'] = config_get( akis_path, [info._ec2['region'], info.manifest.system['architecture']]) if info.manifest.provider.get('enhanced_networking', None) == 'simple': registration_params['SriovNetSupport'] = 'simple' registration_params['EnaSupport'] = True info._ec2['image'] = info._ec2['connection'].register_image( **registration_params) # Setting up tags on the AMI if 'tags' in info.manifest.data: raw_tags = info.manifest.data['tags'] formatted_tags = { k: v.format(**info.manifest_vars) for k, v in raw_tags.items() } tags = [{'Key': k, 'Value': v} for k, v in formatted_tags.items()] info._ec2['connection'].create_tags( Resources=[info._ec2['image']['ImageId']], Tags=tags)
def run(cls, info): from bootstrapvz.common.exceptions import TaskError playbook = rel_path(info.manifest.path, info.manifest.plugins['ansible']['playbook']) if not os.path.exists(playbook): msg = 'The playbook file {playbook} does not exist.'.format(playbook=playbook) raise TaskError(msg) if not os.path.isfile(playbook): msg = 'The playbook path {playbook} does not point to a file.'.format(playbook=playbook) raise TaskError(msg)
def validate_manifest(data, validator, error): from bootstrapvz.common.tools import rel_path validator(data, rel_path(__file__, 'manifest-schema.yml')) shrink_type = get_shrink_type(data['plugins']) if shrink_type == 'vmware-vdiskmanager' and data['volume']['backing'] != 'vmdk': error('Can only shrink vmdk images with vmware-vdiskmanager', ['plugins', 'minimize_size', 'shrink']) if shrink_type == 'qemu-img' and data['volume']['backing'] not in ('vmdk', 'vdi', 'raw', 'qcow2'): error('Can only shrink vmdk, vdi, raw and qcow2 images with qemu-img', ['plugins', 'minimize_size', 'shrink'])
def run(cls, info): from bootstrapvz.common.tools import rel_path info.packages.add('file') # Needed for the init scripts kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') from bootstrapvz.common.tools import config_get kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, info.manifest.system['architecture']]) info.packages.add(kernel_package)
def validate_manifest(data, validator, error): from bootstrapvz.common.tools import rel_path validator(data, rel_path(__file__, 'manifest-schema.yml')) shrink_type = get_shrink_type(data['plugins']) if shrink_type == 'vmware-vdiskmanager' and data['volume']['backing'] != 'vmdk': error('Can only shrink vmdk images with vmware-vdiskmanager', ['plugins', 'minimize_size', 'shrink']) if shrink_type == 'qemu-img' and data['volume']['backing'] not in ('vmdk', 'vdi'): error('Can only shrink vmdk and vdi images with qemu-img', ['plugins', 'minimize_size', 'shrink'])
def validate_manifest(data, validator, error): validator(data, rel_path(__file__, 'manifest-schema.yml')) from bootstrapvz.common.releases import get_release if get_release(data['system']['release']) == wheezy: # prefs is a generator of apt preferences across files in the manifest prefs = (item for vals in data.get('packages', {}).get('preferences', {}).values() for item in vals) if not any('linux-image' in item['package'] and 'wheezy-backports' in item['pin'] for item in prefs): msg = 'The backports kernel is required for the docker daemon to function properly' error(msg, ['packages', 'preferences'])
def run(cls, info): registration_params = { 'name': info._ec2['ami_name'], 'description': info._ec2['ami_description'] } registration_params['architecture'] = { 'i386': 'i386', 'amd64': 'x86_64' }.get(info.manifest.system['architecture']) if info.manifest.volume['backing'] == 's3': registration_params['image_location'] = info._ec2[ 'manifest_location'] else: root_dev_name = { 'pvm': '/dev/sda', 'hvm': '/dev/xvda' }.get(info.manifest.provider['virtualization']) registration_params['root_device_name'] = root_dev_name from boto.ec2.blockdevicemapping import BlockDeviceType from boto.ec2.blockdevicemapping import BlockDeviceMapping block_device = BlockDeviceType( snapshot_id=info._ec2['snapshot'].id, delete_on_termination=True, size=info.volume.size.bytes.get_qty_in('GiB'), volume_type='gp2') registration_params['block_device_map'] = BlockDeviceMapping() registration_params['block_device_map'][ root_dev_name] = block_device if info.manifest.provider['virtualization'] == 'hvm': registration_params['virtualization_type'] = 'hvm' else: registration_params['virtualization_type'] = 'paravirtual' akis_path = rel_path(__file__, 'ami-akis.yml') from bootstrapvz.common.tools import config_get registration_params['kernel_id'] = config_get( akis_path, [info._ec2['region'], info.manifest.system['architecture']]) if info.manifest.provider.get('enhanced_networking', None) == 'simple': registration_params['sriov_net_support'] = 'simple' info._ec2['image'] = info._ec2['connection'].register_image( **registration_params) # Setting up tags on the AMI if 'tags' in info.manifest.data: raw_tags = info.manifest.data['tags'] tags = { k: v.format(**info.manifest_vars) for k, v in raw_tags.items() } info._ec2['connection'].create_tags(info._ec2['image'], tags)
def get_all_tasks(loaded_modules): """Gets a list of all task classes in the package :return: A list of all tasks in the package :rtype: list """ import pkgutil import os.path import bootstrapvz from bootstrapvz.common.tools import rel_path module_paths = set([(rel_path(bootstrapvz.__file__, 'common/tasks'), 'bootstrapvz.common.tasks.')]) for module in loaded_modules: module_path = os.path.dirname(module.__file__) module_prefix = module.__name__ + '.' module_paths.add((module_path, module_prefix)) providers = rel_path(bootstrapvz.__file__, 'providers') for module_loader, module_name, ispkg in pkgutil.iter_modules([providers, 'bootstrapvz.providers']): module_path = os.path.join(module_loader.path, module_name) # The prefix param seems to do nothing, so we prefix the module name ourselves module_prefix = 'bootstrapvz.providers.{}.'.format(module_name) module_paths.add((module_path, module_prefix)) plugins = rel_path(bootstrapvz.__file__, 'plugins') for module_loader, module_name, ispkg in pkgutil.iter_modules([plugins, 'bootstrapvz.plugins']): module_path = os.path.join(module_loader.path, module_name) module_prefix = 'bootstrapvz.plugins.{}.'.format(module_name) module_paths.add((module_path, module_prefix)) # Get generators that return all classes in a module generators = [] for (module_path, module_prefix) in module_paths: generators.append(get_all_classes(module_path, module_prefix)) import itertools classes = itertools.chain(*generators) # lambda function to check whether a class is a task (excluding the superclass Task) def is_task(obj): from task import Task return issubclass(obj, Task) and obj is not Task return filter(is_task, classes) # Only return classes that are tasks
def validate_manifest(data, validator, error): from bootstrapvz.common.tools import rel_path validator(data, rel_path(__file__, 'manifest-schema.yml')) keys = ['username', 'password', 'identity-domain'] if 'credentials' in data['provider']: if not all(key in data['provider']['credentials'] for key in keys): msg = 'All Oracle Compute Cloud credentials should be specified in the manifest' error(msg, ['provider', 'credentials']) if not data['provider'].get('container'): msg = 'The container to which the image will be uploaded should be specified' error(msg, ['provider'])
def run(cls, info): info.packages.add('openssl') info.packages.add('python-openssl') info.packages.add('python-pyasn1') info.packages.add('sudo') info.packages.add('parted') from bootstrapvz.common.tools import config_get, rel_path kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, info.manifest.system['architecture']]) info.packages.add(kernel_package)
def run(cls, info): from bootstrapvz.common.tools import log_call, rel_path pubkey = info.manifest.plugins['admin_user'].get('pubkey', None) if pubkey is not None: abs_pubkey = rel_path(info.manifest.path, pubkey) if not os.path.isfile(abs_pubkey): msg = 'Could not find public key at %s' % pubkey info.manifest.validation_error(msg, ['plugins', 'admin_user', 'pubkey']) ret, _, stderr = log_call(['ssh-keygen', '-l', '-f', abs_pubkey]) if ret != 0: msg = 'Invalid public key file at %s' % pubkey info.manifest.validation_error(msg, ['plugins', 'admin_user', 'pubkey'])
def run(cls, info): from bootstrapvz.common.tools import log_call, rel_path pubkey = info.manifest.plugins['admin_user'].get('pubkey', None) if pubkey is not None: abs_pubkey = rel_path(info.manifest.path, pubkey) if not os.path.isfile(abs_pubkey): msg = 'Could not find public key at %s' % pubkey info.manifest.validation_error(msg, ['plugins', 'admin_user', 'pubkey']) ret, _, stderr = log_call('ssh-keygen -l -f ' + abs_pubkey) if ret != 0: msg = 'Invalid public key file at %s' % pubkey info.manifest.validation_error(msg, ['plugins', 'admin_user', 'pubkey'])
def run(cls, info): info.packages.add('openssl') info.packages.add('python-openssl') info.packages.add('python-pyasn1') info.packages.add('sudo') info.packages.add('parted') from bootstrapvz.common.tools import config_get, rel_path kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') kernel_package = config_get(kernel_packages_path, [ info.manifest.release.codename, info.manifest.system['architecture'] ]) info.packages.add(kernel_package)
def run(cls, info): from bootstrapvz.common.tools import rel_path for file_entry in info.manifest.plugins['file_copy']['files']: # note that we don't use os.path.join because it can't # handle absolute paths, which 'dst' most likely is. final_destination = os.path.normpath( "%s/%s" % (info.root, file_entry['dst'])) src_path = rel_path(info.manifest.path, file_entry['src']) if os.path.isfile(src_path): shutil.copy(src_path, final_destination) else: shutil.copytree(src_path, final_destination) modify_path(info, file_entry['dst'], file_entry)
def __init__(self, path=None, data=None): """Initializer: Given a path we load, validate and parse the manifest. To create the manifest from dynamic data instead of the contents of a file, provide a properly constructed dict as the data argument. :param str path: The path to the manifest (ignored, when `data' is provided) :param str data: The manifest data, if it is not None, it will be used instead of the contents of `path' """ if path is None and data is None: raise ManifestError('`path\' or `data\' must be provided') self.path = path self.metaschema = load_data(rel_path(__file__, 'metaschema.json')) self.load_data(data) self.load_modules() self.validate() self.parse()
def run(cls, info): registration_params = {'Name': info._ec2['ami_name'], 'Description': info._ec2['ami_description']} registration_params['Architecture'] = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) if info.manifest.volume['backing'] == 's3': registration_params['ImageLocation'] = info._ec2['manifest_location'] else: root_dev_name = {'pvm': '/dev/sda', 'hvm': '/dev/xvda'}.get(info.manifest.provider['virtualization']) registration_params['RootDeviceName'] = root_dev_name block_device = [{'DeviceName': root_dev_name, 'Ebs': { 'SnapshotId': info._ec2['snapshot'], 'VolumeSize': info.volume.size.bytes.get_qty_in('GiB'), 'VolumeType': 'gp2', 'DeleteOnTermination': True}}] registration_params['BlockDeviceMappings'] = block_device if info.manifest.provider['virtualization'] == 'hvm': registration_params['VirtualizationType'] = 'hvm' else: registration_params['VirtualizationType'] = 'paravirtual' akis_path = rel_path(__file__, 'ami-akis.yml') from bootstrapvz.common.tools import config_get registration_params['kernel_id'] = config_get(akis_path, [info._ec2['region'], info.manifest.system['architecture']]) if info.manifest.provider.get('enhanced_networking', None) == 'simple': registration_params['SriovNetSupport'] = 'simple' registration_params['EnaSupport'] = True info._ec2['image'] = info._ec2['connection'].register_image(**registration_params) # Setting up tags on the AMI if 'tags' in info.manifest.data: raw_tags = info.manifest.data['tags'] formatted_tags = {k: v.format(**info.manifest_vars) for k, v in raw_tags.items()} tags = [{'Key': k, 'Value': v} for k, v in formatted_tags.items()] info._ec2['connection'].create_tags(Resources=[info._ec2['image']['ImageId']], Tags=tags)
def run(cls, info): from bootstrapvz.common.tools import rel_path for file_entry in info.manifest.plugins['file_copy']['files']: # note that we don't use os.path.join because it can't # handle absolute paths, which 'dst' most likely is. final_destination = os.path.normpath("%s/%s" % (info.root, file_entry['dst'])) src_path = rel_path(info.manifest.path, file_entry['src']) if os.path.isfile(src_path): shutil.copy(src_path, final_destination) else: shutil.copytree(src_path, final_destination) if os.path.isfile(src_path) and os.path.isdir(final_destination): dst = os.path.join(final_destination, os.path.basename(src_path)) else: dst = final_destination modify_path(info, dst, file_entry)
def run(cls, info): if 'ec2-get-credentials' in info.initd['install']: log.warn( 'You are using a static public key for the admin account.' 'This will conflict with the ec2 public key injection mechanism.' 'The ec2-get-credentials startup script will therefore not be enabled.' ) del info.initd['install']['ec2-get-credentials'] # Get the stuff we need (username & public key) username = info.manifest.plugins['admin_user']['username'] from bootstrapvz.common.tools import rel_path pubkey_path = rel_path(info.manifest.path, info.manifest.plugins['admin_user']['pubkey']) with open(pubkey_path) as pubkey_handle: pubkey = pubkey_handle.read() # paths from os.path import join ssh_dir_rel = join('home', username, '.ssh') auth_keys_rel = join(ssh_dir_rel, 'authorized_keys') ssh_dir_abs = join(info.root, ssh_dir_rel) auth_keys_abs = join(info.root, auth_keys_rel) # Create the ssh dir if nobody has created it yet if not os.path.exists(ssh_dir_abs): os.mkdir(ssh_dir_abs, 0o700) # Create (or append to) the authorized keys file (and chmod u=rw,go=) import stat with open(auth_keys_abs, 'a') as auth_keys_handle: auth_keys_handle.write(pubkey + '\n') os.chmod(auth_keys_abs, (stat.S_IRUSR | stat.S_IWUSR)) # Set the owner of the authorized keys file # (must be through chroot, the host system doesn't know about the user) from bootstrapvz.common.tools import log_check_call log_check_call([ 'chroot', info.root, 'chown', '-R', (username + ':' + username), ssh_dir_rel ])
def validate_manifest(data, validator, error): """Validates the manifest using the base manifest :param dict data: The data of the manifest :param function validator: The function that validates the manifest given the data and a path :param function error: The function tha raises an error when the validation fails """ from bootstrapvz.common.tools import rel_path validator(data, rel_path(__file__, 'manifest-schema.yml')) from bootstrapvz.common.releases import get_release, squeeze release = get_release(data['system']['release']) if release < squeeze: error('Only Debian squeeze and later is supported', ['system', 'release']) # Check the bootloader/partitioning configuration. # Doing this via the schema is a pain and does not output a useful error message. if data['system']['bootloader'] == 'grub': if data['volume']['partitions']['type'] == 'none': error('Grub cannot boot from unpartitioned disks', ['system', 'bootloader']) if release == squeeze: error('Grub installation on squeeze is not supported', ['system', 'bootloader']) # Check the provided apt.conf(5) options if 'packages' in data: for name, val in data['packages'].get('apt.conf.d', {}).iteritems(): from bootstrapvz.common.tools import log_call status, _, _ = log_call(['apt-config', '-c=/dev/stdin', 'dump'], stdin=val + '\n') if status != 0: error('apt.conf(5) syntax error', ['packages', 'apt.conf.d', name])
def run(cls, info): info.packages.add('acpi-support-base') info.packages.add('busybox') info.packages.add('ca-certificates') info.packages.add('curl') info.packages.add('ethtool') info.packages.add('gdisk') info.packages.add('kpartx') info.packages.add('isc-dhcp-client') info.packages.add('lsb-release') info.packages.add('ntp') info.packages.add('parted') info.packages.add('python') info.packages.add('openssh-client') info.packages.add('openssh-server') info.packages.add('sudo') info.packages.add('uuid-runtime') kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, info.manifest.system['architecture']]) info.packages.add(kernel_package)
def run(cls, info): if 'ec2-get-credentials' in info.initd['install']: log.warn('You are using a static public key for the admin account.' 'This will conflict with the ec2 public key injection mechanism.' 'The ec2-get-credentials startup script will therefore not be enabled.') del info.initd['install']['ec2-get-credentials'] # Get the stuff we need (username & public key) username = info.manifest.plugins['admin_user']['username'] from bootstrapvz.common.tools import rel_path pubkey_path = rel_path(info.manifest.path, info.manifest.plugins['admin_user']['pubkey']) with open(pubkey_path) as pubkey_handle: pubkey = pubkey_handle.read() # paths from os.path import join ssh_dir_rel = join('home', username, '.ssh') auth_keys_rel = join(ssh_dir_rel, 'authorized_keys') ssh_dir_abs = join(info.root, ssh_dir_rel) auth_keys_abs = join(info.root, auth_keys_rel) # Create the ssh dir if nobody has created it yet if not os.path.exists(ssh_dir_abs): os.mkdir(ssh_dir_abs, 0700) # Create (or append to) the authorized keys file (and chmod u=rw,go=) import stat with open(auth_keys_abs, 'a') as auth_keys_handle: auth_keys_handle.write(pubkey + '\n') os.chmod(auth_keys_abs, (stat.S_IRUSR | stat.S_IWUSR)) # Set the owner of the authorized keys file # (must be through chroot, the host system doesn't know about the user) from bootstrapvz.common.tools import log_check_call log_check_call(['chroot', info.root, 'chown', '-R', (username + ':' + username), ssh_dir_rel])
def run(cls, info): registration_params = {'name': info._ec2['ami_name'], 'description': info._ec2['ami_description']} registration_params['architecture'] = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) if info.manifest.volume['backing'] == 's3': registration_params['image_location'] = info._ec2['manifest_location'] else: root_dev_name = {'pvm': '/dev/sda', 'hvm': '/dev/xvda'}.get(info.manifest.provider['virtualization']) registration_params['root_device_name'] = root_dev_name from boto.ec2.blockdevicemapping import BlockDeviceType from boto.ec2.blockdevicemapping import BlockDeviceMapping block_device = BlockDeviceType(snapshot_id=info._ec2['snapshot'].id, delete_on_termination=True, size=info.volume.size.bytes.get_qty_in('GiB'), volume_type='gp2') registration_params['block_device_map'] = BlockDeviceMapping() registration_params['block_device_map'][root_dev_name] = block_device if info.manifest.provider['virtualization'] == 'hvm': registration_params['virtualization_type'] = 'hvm' else: registration_params['virtualization_type'] = 'paravirtual' akis_path = rel_path(__file__, 'ami-akis.yml') from bootstrapvz.common.tools import config_get registration_params['kernel_id'] = config_get(akis_path, [info._ec2['region'], info.manifest.system['architecture']]) if info.manifest.provider.get('enhanced_networking', None) == 'simple': registration_params['sriov_net_support'] = 'simple' info._ec2['image'] = info._ec2['connection'].register_image(**registration_params) # Setting up tags on the AMI if 'tags' in info.manifest.data: raw_tags = info.manifest.data['tags'] tags = {k: v.format(**info.manifest_vars) for k, v in raw_tags.items()} info._ec2['connection'].create_tags(info._ec2['image'], tags)
def run(cls, info): from bootstrapvz.common.tools import log_call, log_check_call for line in log_check_call([ 'chroot', info.root, 'apt-cache', 'show', info.kernel['headers_pkg'] ]): key, value = line.split(':') if key.strip() == 'Depends': kernel_version = value.strip().split('linux-headers-')[-1] break guest_additions_path = rel_path( info.manifest.path, info.manifest.provider['guest_additions']) mount_dir = 'mnt/guest_additions' mount_path = os.path.join(info.root, mount_dir) os.mkdir(mount_path) root = info.volume.partition_map.root root.add_mount(guest_additions_path, mount_path, ['-o', 'loop']) install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run') install_wrapper_name = 'install_guest_additions.sh' install_wrapper = open(os.path.join(assets, install_wrapper_name)) \ .read() \ .replace("KERNEL_VERSION", kernel_version) \ .replace("KERNEL_ARCH", info.kernel['arch']) \ .replace("INSTALL_SCRIPT", install_script) install_wrapper_path = os.path.join(info.root, install_wrapper_name) with open(install_wrapper_path, 'w') as f: f.write(install_wrapper + '\n') # Don't check the return code of the scripts here, because 1 not necessarily means they have failed log_call(['chroot', info.root, 'bash', '/' + install_wrapper_name]) # VBoxService process could be running, as it is not affected by DisableDaemonAutostart log_call(['chroot', info.root, 'service', 'vboxadd-service', 'stop']) root.remove_mount(mount_path) os.rmdir(mount_path) os.remove(install_wrapper_path)
def run(cls, info): info.packages.add('acpi-support-base') info.packages.add('busybox') info.packages.add('ca-certificates') info.packages.add('curl') info.packages.add('ethtool') info.packages.add('gdisk') info.packages.add('kpartx') info.packages.add('isc-dhcp-client') info.packages.add('lsb-release') info.packages.add('ntp') info.packages.add('parted') info.packages.add('python') info.packages.add('openssh-client') info.packages.add('openssh-server') info.packages.add('sudo') info.packages.add('uuid-runtime') kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') kernel_package = config_get(kernel_packages_path, [ info.manifest.release.codename, info.manifest.system['architecture'] ]) info.packages.add(kernel_package)
def validate_manifest(data, validator, error): from bootstrapvz.common.tools import rel_path validator(data, rel_path(__file__, 'manifest-schema.yml'))
from bootstrapvz.common.tools import rel_path assets = rel_path(__file__, 'assets') def validate_manifest(data, validator, error): from bootstrapvz.common.tools import rel_path validator(data, rel_path(__file__, 'manifest-schema.yml')) def resolve_tasks(taskset, manifest): import tasks import bootstrapvz.providers.ec2.tasks.initd as initd_ec2 from bootstrapvz.common.tasks import apt from bootstrapvz.common.tasks import initd from bootstrapvz.common.tasks import ssh from bootstrapvz.common.releases import wheezy from bootstrapvz.common.releases import jessie if manifest.release == wheezy: taskset.add(apt.AddBackports) if manifest.release >= jessie: taskset.add(tasks.SetCloudInitMountOptions) taskset.update([tasks.SetMetadataSource, tasks.AddCloudInitPackages, ]) options = manifest.plugins['cloud_init'] if 'username' in options:
def validate_manifest(data, validator, error): validator(data, rel_path(__file__, 'manifest-schema.yml'))
def run(cls, info): kernel_packages_path = rel_path(__file__, 'packages-kernels.yml') kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, info.manifest.system['architecture']]) info.packages.add(kernel_package)
from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import apt from bootstrapvz.common.tasks import initd from bootstrapvz.common.tools import log_check_call from bootstrapvz.common.tools import rel_path from bootstrapvz.common.tools import sed_i import os import shutil ASSETS_DIR = rel_path(__file__, 'assets') class InstallGrowpart(Task): description = 'Adding necessary packages for growpart.' phase = phases.preparation predecessors = [apt.AddBackports] @classmethod def run(cls, info): # Use the cloud-guest-utils package from jessie-backports which has # several significant bug fixes from the mainline growpart script. target = None from bootstrapvz.common.releases import jessie if info.manifest.release == jessie: target = '{system.release}-backports' info.packages.add('cloud-guest-utils', target) class InstallExpandRootScripts(Task): description = 'Installing scripts for expand-root.'
from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks.packages import InstallPackages from bootstrapvz.common.exceptions import TaskError from bootstrapvz.common.tools import rel_path import os assets = rel_path(__file__, '../assets') class CheckGuestAdditionsPath(Task): description = 'Checking whether the VirtualBox Guest Additions image exists' phase = phases.validation @classmethod def run(cls, info): guest_additions_path = info.manifest.provider['guest_additions'] if not os.path.exists(guest_additions_path): msg = 'The file {file} does not exist.'.format(file=guest_additions_path) raise TaskError(msg) class AddGuestAdditionsPackages(Task): description = 'Adding packages to support Guest Additions installation' phase = phases.package_installation successors = [InstallPackages] @classmethod def run(cls, info): info.packages.add('bzip2') info.packages.add('build-essential')
def validate_manifest(data, validator, error): schema_path = rel_path(__file__, "manifest-schema.yml") validator(data, schema_path)
import os from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import apt from bootstrapvz.common.exceptions import TaskError from bootstrapvz.common.releases import jessie, wheezy, stretch, buster from bootstrapvz.common.tools import sed_i, log_check_call, rel_path ASSETS_DIR_BUSTER = rel_path(__file__, 'assets/gpg-keyrings-PC1/buster') ASSETS_DIR_STRETCH = rel_path(__file__, 'assets/gpg-keyrings-PC1/stretch') ASSETS_DIR_JESSIE = rel_path(__file__, 'assets/gpg-keyrings-PC1/jessie') ASSETS_DIR_WHEEZY = rel_path(__file__, 'assets/gpg-keyrings-PC1/wheezy') class CheckRequestedDebianRelease(Task): description = 'Checking whether there is a release available for {info.manifest.release}' phase = phases.validation @classmethod def run(cls, info): if info.manifest.release not in (jessie, wheezy, stretch, buster): msg = 'Debian {info.manifest.release} is not (yet) available in the Puppetlabs.com APT repository.' raise TaskError(msg) class CheckAssetsPath(Task): description = 'Checking whether the assets path exist' phase = phases.validation predecessors = [CheckRequestedDebianRelease] @classmethod
import os from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import apt from bootstrapvz.common.exceptions import TaskError from bootstrapvz.common.releases import jessie, wheezy, stretch from bootstrapvz.common.tools import sed_i, log_check_call, rel_path from __builtin__ import str ASSETS_DIR_STRETCH = rel_path(__file__, 'assets/gpg-keyrings-PC1/stretch') ASSETS_DIR_JESSIE = rel_path(__file__, 'assets/gpg-keyrings-PC1/jessie') ASSETS_DIR_WHEEZY = rel_path(__file__, 'assets/gpg-keyrings-PC1/wheezy') class CheckRequestedDebianRelease(Task): description = 'Checking whether there is a release available for {info.manifest.release}' phase = phases.validation @classmethod def run(cls, info): from bootstrapvz.common.exceptions import TaskError if info.manifest.release not in (jessie, wheezy, stretch): msg = 'Debian {info.manifest.release} is not (yet) available in the Puppetlabs.com APT repository.' raise TaskError(msg) class CheckAssetsPath(Task): description = 'Checking whether the assets path exist' phase = phases.validation predecessors = [CheckRequestedDebianRelease]