def run(cls, info): version = '2.15.3' drivers_url = 'http://downloads.sourceforge.net/project/e1000/ixgbevf stable/%s/ixgbevf-%s.tar.gz' % (version, version) archive = os.path.join(info.root, 'tmp', 'ixgbevf-%s.tar.gz' % (version)) module_path = os.path.join(info.root, 'usr', 'src', 'ixgbevf-%s' % (version)) import urllib urllib.urlretrieve(drivers_url, archive) from bootstrapvz.common.tools import log_check_call log_check_call(['tar', '--ungzip', '--extract', '--file', archive, '--directory', os.path.join(info.root, 'usr', 'src')]) with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: dkms_conf.write("""PACKAGE_NAME="ixgbevf" PACKAGE_VERSION="%s" CLEAN="cd src/; make clean" MAKE="cd src/; make BUILD_KERNEL=${kernelver}" BUILT_MODULE_LOCATION[0]="src/" BUILT_MODULE_NAME[0]="ixgbevf" DEST_MODULE_LOCATION[0]="/updates" DEST_MODULE_NAME[0]="ixgbevf" AUTOINSTALL="yes" """ % (version)) for task in ['add', 'build', 'install']: # Invoke DKMS task using specified kernel module (-m) and version (-v) log_check_call(['chroot', info.root, 'dkms', task, '-m', 'ixgbevf', '-v', version])
def _before_link_dm_node(self, e): """Links the volume using the device mapper This allows us to create a 'window' into the volume that acts like a volume in itself. Mainly it is used to fool grub into thinking that it is working with a real volume, rather than a loopback device or a network block device. :param _e_obj e: Event object containing arguments to create() Keyword arguments to link_dm_node() are: :param int logical_start_sector: The sector the volume should start at in the new volume :param int start_sector: The offset at which the volume should begin to be mapped in the new volume :param int sectors: The number of sectors that should be mapped Read more at: http://manpages.debian.org/cgi-bin/man.cgi?query=dmsetup&apropos=0&sektion=0&manpath=Debian+7.0+wheezy&format=html&locale=en :raises VolumeError: When a free block device cannot be found. """ import os.path from bootstrapvz.common.fs import get_partitions # Fetch information from /proc/partitions proc_partitions = get_partitions() device_name = os.path.basename(self.device_path) device_partition = proc_partitions[device_name] # The sector the volume should start at in the new volume logical_start_sector = getattr(e, 'logical_start_sector', 0) # The offset at which the volume should begin to be mapped in the new volume start_sector = getattr(e, 'start_sector', 0) # The number of sectors that should be mapped sectors = getattr(e, 'sectors', int(self.size) - start_sector) # This is the table we send to dmsetup, so that it may create a device mapping for us. table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}' .format(log_start_sec=logical_start_sector, sectors=sectors, major=device_partition['major'], minor=device_partition['minor'], start_sec=start_sector)) import string import os.path # Figure out the device letter and path for letter in string.ascii_lowercase: dev_name = 'vd' + letter dev_path = os.path.join('/dev/mapper', dev_name) if not os.path.exists(dev_path): self.dm_node_name = dev_name self.dm_node_path = dev_path break if not hasattr(self, 'dm_node_name'): raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume') # Create the device mapping log_check_call(['dmsetup', 'create', self.dm_node_name], table) # Update the device_path but remember the old one for when we unlink the volume again self.unlinked_device_path = self.device_path self.device_path = self.dm_node_path
def run(cls, info): vagrantfile_source = os.path.join(assets, 'Vagrantfile') vagrantfile = os.path.join(info._vagrant['folder'], 'Vagrantfile') shutil.copy(vagrantfile_source, vagrantfile) import random mac_address = '080027{mac:06X}'.format(mac=random.randrange(16 ** 6)) from bootstrapvz.common.tools import sed_i sed_i(vagrantfile, '\\[MAC_ADDRESS\\]', mac_address) metadata_source = os.path.join(assets, 'metadata.json') metadata = os.path.join(info._vagrant['folder'], 'metadata.json') shutil.copy(metadata_source, metadata) from bootstrapvz.common.tools import log_check_call disk_name = 'box-disk1.' + info.volume.extension disk_link = os.path.join(info._vagrant['folder'], disk_name) log_check_call(['ln', '-s', info.volume.image_path, disk_link]) ovf_path = os.path.join(info._vagrant['folder'], 'box.ovf') cls.write_ovf(info, ovf_path, mac_address, disk_name) box_files = os.listdir(info._vagrant['folder']) log_check_call(['tar', '--create', '--gzip', '--dereference', '--file', info._vagrant['box_path'], '--directory', info._vagrant['folder']] + box_files ) import logging logging.getLogger(__name__).info('The vagrant box has been placed at ' + info._vagrant['box_path'])
def run(cls, info): # Extract playbook and directory playbook = rel_path(info.manifest.path, info.manifest.plugins['ansible']['playbook']) # build the inventory file inventory = os.path.join(info.root, 'tmp/bootstrap-inventory') with open(inventory, 'w') as handle: conn = '{} ansible_connection=chroot'.format(info.root) content = "" if 'groups' in info.manifest.plugins['ansible']: for group in info.manifest.plugins['ansible']['groups']: content += '[{}]\n{}\n'.format(group, conn) else: content = conn handle.write(content) # build the ansible command cmd = ['ansible-playbook', '-i', inventory, playbook] if 'extra_vars' in info.manifest.plugins['ansible']: cmd.extend(['--extra-vars', json.dumps(info.manifest.plugins['ansible']['extra_vars'])]) if 'tags' in info.manifest.plugins['ansible']: cmd.extend(['--tags', ','.join(info.manifest.plugins['ansible']['tags'])]) if 'skip_tags' in info.manifest.plugins['ansible']: cmd.extend(['--skip-tags', ','.join(info.manifest.plugins['ansible']['skip_tags'])]) if 'opt_flags' in info.manifest.plugins['ansible']: # Should probably do proper validation on these, but I don't think it should be used very often. cmd.extend(info.manifest.plugins['ansible']['opt_flags']) # Run and remove the inventory file log_check_call(cmd) os.remove(inventory)
def _before_create(self, e): """Creates the partition """ from bootstrapvz.common.tools import log_check_call # The create command is fairly simple: # - fs_type is the partition filesystem, as defined by parted: # fs-type can be one of "fat16", "fat32", "ext2", "HFS", "linux-swap", # "NTFS", "reiserfs", or "ufs". # - start and end are just Bytes objects coerced into strings if self.filesystem == 'swap': fs_type = 'linux-swap' else: fs_type = 'ext2' create_command = ('mkpart primary {fs_type} {start} {end}'.format( fs_type=fs_type, start=str(self.get_start() + self.pad_start), end=str(self.get_end() - self.pad_end))) # Create the partition log_check_call([ 'parted', '--script', '--align', 'none', e.volume.device_path, '--', create_command ]) # Set any flags on the partition for flag in self.flags: log_check_call([ 'parted', '--script', e.volume.device_path, '--', ('set {idx} {flag} on'.format(idx=str(self.get_index()), flag=flag)) ])
def run(cls, info): from bootstrapvz.common.tools import log_check_call # Extract playbook and directory playbook = rel_path(info.manifest.path, info.manifest.plugins['ansible']['playbook']) # build the inventory file inventory = os.path.join(info.root, 'tmp/bootstrap-inventory') with open(inventory, 'w') as handle: conn = '{} ansible_connection=chroot'.format(info.root) content = "" if 'groups' in info.manifest.plugins['ansible']: for group in info.manifest.plugins['ansible']['groups']: content += '[{}]\n{}\n'.format(group, conn) else: content = conn handle.write(content) # build the ansible command cmd = ['ansible-playbook', '-i', inventory, playbook] if 'extra_vars' in info.manifest.plugins['ansible']: cmd.extend(['--extra-vars', json.dumps(info.manifest.plugins['ansible']['extra_vars'])]) if 'tags' in info.manifest.plugins['ansible']: cmd.extend(['--tags', ','.join(info.manifest.plugins['ansible']['tags'])]) if 'skip_tags' in info.manifest.plugins['ansible']: cmd.extend(['--skip-tags', ','.join(info.manifest.plugins['ansible']['skip_tags'])]) if 'opt_flags' in info.manifest.plugins['ansible']: # Should probably do proper validation on these, but I don't think it should be used very often. cmd.extend(info.manifest.plugins['ansible']['opt_flags']) # Run and remove the inventory file log_check_call(cmd) os.remove(inventory)
def run(cls, info): from bootstrapvz.common.tools import log_check_call for dir_entry in info.manifest.plugins['file_copy']['mkdirs']: mkdir_command = ['chroot', info.root, 'mkdir', '-p', dir_entry['dir']] log_check_call(mkdir_command) modify_path(info, dir_entry['dir'], dir_entry)
def run(cls, info): vagrantfile_source = os.path.join(assets, 'Vagrantfile') vagrantfile = os.path.join(info._vagrant['folder'], 'Vagrantfile') shutil.copy(vagrantfile_source, vagrantfile) import random mac_address = '080027{mac:06X}'.format(mac=random.randrange(16**6)) from bootstrapvz.common.tools import sed_i sed_i(vagrantfile, '\\[MAC_ADDRESS\\]', mac_address) metadata_source = os.path.join(assets, 'metadata.json') metadata = os.path.join(info._vagrant['folder'], 'metadata.json') shutil.copy(metadata_source, metadata) from bootstrapvz.common.tools import log_check_call disk_name = 'box-disk1.' + info.volume.extension disk_link = os.path.join(info._vagrant['folder'], disk_name) log_check_call(['ln', '-s', info.volume.image_path, disk_link]) ovf_path = os.path.join(info._vagrant['folder'], 'box.ovf') cls.write_ovf(info, ovf_path, mac_address, disk_name) box_files = os.listdir(info._vagrant['folder']) log_check_call([ 'tar', '--create', '--gzip', '--dereference', '--file', info._vagrant['box_path'], '--directory', info._vagrant['folder'] ] + box_files) import logging logging.getLogger(__name__).info( 'The vagrant box has been placed at ' + info._vagrant['box_path'])
def _before_create(self, e): # Create the partition and then set the name of the partition afterwards super(GPTPartition, self)._before_create(e) # partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical) name_command = 'name {idx} {name}'.format(idx=self.get_index(), name=self.name) log_check_call(['parted', '--script', e.volume.device_path, '--', name_command])
def run(cls, info): version = "1.0.0" drivers_url = "https://github.com/amzn/amzn-drivers" module_path = os.path.join(info.root, "usr", "src", "amzn-drivers-%s" % (version)) from bootstrapvz.common.tools import log_check_call log_check_call(["git", "clone", drivers_url, module_path]) with open(os.path.join(module_path, "dkms.conf"), "w") as dkms_conf: dkms_conf.write( """PACKAGE_NAME="ena" PACKAGE_VERSION="%s" CLEAN="make -C kernel/linux/ena clean" MAKE="make -C kernel/linux/ena/ BUILD_KERNEL=${kernelver}" BUILT_MODULE_NAME[0]="ena" BUILT_MODULE_LOCATION="kernel/linux/ena" DEST_MODULE_LOCATION[0]="/updates" DEST_MODULE_NAME[0]="ena" AUTOINSTALL="yes" """ % (version) ) for task in ["add", "build", "install"]: # Invoke DKMS task using specified kernel module (-m) and version (-v) log_check_call( ["chroot", info.root, "dkms", task, "-m", "amzn-drivers", "-v", version, "-k", info.kernel_version] )
def run(cls, info): import datetime image_name = info.manifest.name.format(**info.manifest_vars) filename = image_name + "." + info.volume.extension today = datetime.datetime.today() name_suffix = today.strftime("%Y%m%d") image_name_format = "{lsb_distribution}-{lsb_release}-{release}-v{name_suffix}" image_name = image_name_format.format( lsb_distribution=info._gce["lsb_distribution"], lsb_release=info._gce["lsb_release"], release=info.manifest.system["release"], name_suffix=name_suffix, ) # ensure that we do not use disallowed characters in image name image_name = image_name.lower() image_name = image_name.replace(".", "-") info._gce["image_name"] = image_name tarball_name = image_name + ".tar.gz" tarball_path = os.path.join(info.manifest.bootstrapper["workspace"], tarball_name) info._gce["tarball_name"] = tarball_name info._gce["tarball_path"] = tarball_path log_check_call( ["tar", "--sparse", "-C", info.manifest.bootstrapper["workspace"], "-caf", tarball_path, filename] )
def run(cls, info): log_check_call(['chroot', info.root, 'apt-get', 'clean']) lists = os.path.join(info.root, 'var/lib/apt/lists') for list_file in [os.path.join(lists, f) for f in os.listdir(lists)]: if os.path.isfile(list_file): os.remove(list_file)
def run(cls, info): version = '1.0.0' drivers_url = 'https://github.com/amzn/amzn-drivers' module_path = os.path.join(info.root, 'usr', 'src', 'amzn-drivers-%s' % (version)) from bootstrapvz.common.tools import log_check_call log_check_call(['git', 'clone', drivers_url, module_path]) with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: dkms_conf.write("""PACKAGE_NAME="ena" PACKAGE_VERSION="%s" CLEAN="make -C kernel/linux/ena clean" MAKE="make -C kernel/linux/ena/ BUILD_KERNEL=${kernelver}" BUILT_MODULE_NAME[0]="ena" BUILT_MODULE_LOCATION="kernel/linux/ena" DEST_MODULE_LOCATION[0]="/updates" DEST_MODULE_NAME[0]="ena" AUTOINSTALL="yes" """ % (version)) for task in ['add', 'build', 'install']: # Invoke DKMS task using specified kernel module (-m) and version (-v) log_check_call([ 'chroot', info.root, 'dkms', task, '-m', 'amzn-drivers', '-v', version, '-k', info.kernel_version ])
def run(cls, info): version = '1.0.0' drivers_url = 'https://github.com/amzn/amzn-drivers' module_path = os.path.join(info.root, 'usr', 'src', 'amzn-drivers-%s' % (version)) from bootstrapvz.common.tools import log_check_call log_check_call(['git', 'clone', drivers_url, module_path]) with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: dkms_conf.write("""PACKAGE_NAME="ena" PACKAGE_VERSION="%s" CLEAN="make -C kernel/linux/ena clean" MAKE="make -C kernel/linux/ena/ BUILD_KERNEL=${kernelver}" BUILT_MODULE_NAME[0]="ena" BUILT_MODULE_LOCATION="kernel/linux/ena" DEST_MODULE_LOCATION[0]="/updates" DEST_MODULE_NAME[0]="ena" AUTOINSTALL="yes" """ % (version)) for task in ['add', 'build', 'install']: # Invoke DKMS task using specified kernel module (-m) and version (-v) log_check_call(['chroot', info.root, 'dkms', task, '-m', 'amzn-drivers', '-v', version, '-k', info.kernel_version])
def run(cls, info): import datetime image_name = info.manifest.image['name'].format(**info.manifest_vars) filename = image_name + '.' + info.volume.extension today = datetime.datetime.today() name_suffix = today.strftime('%Y%m%d') image_name_format = '{lsb_distribution}-{lsb_release}-{release}-v{name_suffix}' image_name = image_name_format.format( lsb_distribution=info._gce['lsb_distribution'], lsb_release=info._gce['lsb_release'], release=info.manifest.system['release'], name_suffix=name_suffix) # ensure that we do not use disallowed characters in image name image_name = image_name.lower() image_name = image_name.replace(".", "-") info._gce['image_name'] = image_name tarball_name = image_name + '.tar.gz' tarball_path = os.path.join(info.manifest.bootstrapper['workspace'], tarball_name) info._gce['tarball_name'] = tarball_name info._gce['tarball_path'] = tarball_path log_check_call([ 'tar', '--sparse', '-C', info.manifest.bootstrapper['workspace'], '-caf', tarball_path, filename ])
def run(cls, info): from bootstrapvz.common.tools import log_check_call from . import assets # c.f. http://anonscm.debian.org/cgit/pkg-systemd/systemd.git/commit/?id=61e055638cea with open(os.path.join(assets, 'udev.diff')) as diff_file: udev_dir = os.path.join(info.root, 'usr/share/initramfs-tools/scripts/init-top') log_check_call(['patch', '--no-backup-if-mismatch', '-p6', '-d' + udev_dir], stdin=diff_file)
def _before_create(self, e): """Creates the partition """ from bootstrapvz.common.tools import log_check_call # The create command is fairly simple: # - fs_type is the partition filesystem, as defined by parted: # fs-type can be one of "fat16", "fat32", "ext2", "HFS", "linux-swap", # "NTFS", "reiserfs", or "ufs". # - start and end are just Bytes objects coerced into strings if self.filesystem == 'swap': fs_type = 'linux-swap' else: fs_type = 'ext2' create_command = ('mkpart primary {fs_type} {start} {end}' .format(fs_type=fs_type, start=str(self.get_start() + self.pad_start), end=str(self.get_end() - self.pad_end))) # Create the partition log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path, '--', create_command]) # Set any flags on the partition for flag in self.flags: log_check_call(['parted', '--script', e.volume.device_path, '--', ('set {idx} {flag} on' .format(idx=str(self.get_index()), flag=flag))])
def run(cls, info): if 'ec2-get-credentials' in info.initd['install']: log.warn('You are using a static public key for the admin account.' 'This will conflict with the ec2 public key injection mechanism.' 'The ec2-get-credentials startup script will therefore not be enabled.') del info.initd['install']['ec2-get-credentials'] # Get the stuff we need (username & public key) username = info.manifest.plugins['admin_user']['username'] with open(info.manifest.plugins['admin_user']['pubkey']) as pubkey_handle: pubkey = pubkey_handle.read() # Create the ssh dir if nobody has created it yet ssh_dir = os.path.join('/home', username, '.ssh') if not os.path.exists(ssh_dir): os.mkdir(ssh_dir, 0700) # Create (or append to) the authorized keys file (and chmod u=rw,go=) import stat auth_keys_abs = os.path.join(info.root, 'home', username, '.ssh/authorized_keys') with open(auth_keys_abs, 'a') as auth_keys_handle: auth_keys_handle.write(pubkey + '\n') os.chmod(auth_keys_abs, (stat.S_IRUSR | stat.S_IWUSR)) # Set the owner of the authorized keys file # (must be through chroot, the host system doesn't know about the user) from bootstrapvz.common.tools import log_check_call auth_keys_rel = os.path.join(ssh_dir, 'authorized_keys') log_check_call(['chroot', info.root, 'chown', '-R', username, auth_keys_rel])
def run(cls, info): version = '3.2.2' drivers_url = 'https://downloadmirror.intel.com/26561/eng/ixgbevf-3.2.2.tar.gz' # Sadly the first number in the URL changes: # 2.16.1 => https://downloadmirror.intel.com/25464/eng/ixgbevf-2.16.1.tar.gz archive = os.path.join(info.root, 'tmp', 'ixgbevf-%s.tar.gz' % (version)) module_path = os.path.join(info.root, 'usr', 'src', 'ixgbevf-%s' % (version)) import urllib urllib.urlretrieve(drivers_url, archive) from bootstrapvz.common.tools import log_check_call log_check_call(['tar', '--ungzip', '--extract', '--file', archive, '--directory', os.path.join(info.root, 'usr', 'src')]) with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: dkms_conf.write("""PACKAGE_NAME="ixgbevf" PACKAGE_VERSION="%s" CLEAN="cd src/; sed -i '1s/^/EXTRA_CFLAGS := -fno-pie/' Makefile && make clean" MAKE="cd src/; make BUILD_KERNEL=${kernelver}" BUILT_MODULE_LOCATION[0]="src/" BUILT_MODULE_NAME[0]="ixgbevf" DEST_MODULE_LOCATION[0]="/updates" DEST_MODULE_NAME[0]="ixgbevf" AUTOINSTALL="yes" """ % (version)) for task in ['add', 'build', 'install']: # Invoke DKMS task using specified kernel module (-m) and version (-v) log_check_call(['chroot', info.root, 'dkms', task, '-m', 'ixgbevf', '-v', version, '-k', info.kernel_version])
def run(cls, info): bundle_name = "bundle-{id}".format(id=info.run_id) info._ec2["bundle_path"] = os.path.join(info.workspace, bundle_name) arch = {"i386": "i386", "amd64": "x86_64"}.get(info.manifest.system["architecture"]) log_check_call( [ "euca-bundle-image", "--image", info.volume.image_path, "--arch", arch, "--user", info.credentials["user-id"], "--privatekey", info.credentials["private-key"], "--cert", info.credentials["certificate"], "--ec2cert", cert_ec2, "--destination", info._ec2["bundle_path"], "--prefix", info._ec2["ami_name"], ] )
def run(cls, info): # Download bootstrap script bootstrap_script = os.path.join(info.root, 'install_salt.sh') with open(bootstrap_script, 'w') as f: d = urllib.urlopen('http://bootstrap.saltstack.org') f.write(d.read()) # This is needed since bootstrap doesn't handle -X for debian distros properly. # We disable checking for running services at end since we do not start them. sed_i(bootstrap_script, 'install_debian_check_services', "disabled_debian_check_services") bootstrap_command = [ 'chroot', info.root, 'bash', 'install_salt.sh', '-X' ] if 'master' in info.manifest.plugins['salt']: bootstrap_command.extend( ['-A', info.manifest.plugins['salt']['master']]) install_source = info.manifest.plugins['salt']['install_source'] bootstrap_command.append(install_source) if install_source == 'git' and ('version' in info.manifest.plugins['salt']): bootstrap_command.append(info.manifest.plugins['salt']['version']) log_check_call(bootstrap_command)
def run(cls, info): manifest_file = os.path.join(info._ec2["bundle_path"], info._ec2["ami_name"] + ".manifest.xml") if info._ec2["host"]["region"] == "us-east-1": s3_url = "https://s3.amazonaws.com/" elif info._ec2["host"]["region"] == "cn-north-1": s3_url = "https://s3.cn-north-1.amazonaws.com.cn" else: s3_url = "https://s3-{region}.amazonaws.com/".format(region=info._ec2["host"]["region"]) info._ec2["manifest_location"] = info.manifest.image["bucket"] + "/" + info._ec2["ami_name"] + ".manifest.xml" log_check_call( [ "euca-upload-bundle", "--bucket", info.manifest.image["bucket"], "--manifest", manifest_file, "--access-key", info.credentials["access-key"], "--secret-key", info.credentials["secret-key"], "--url", s3_url, "--region", info._ec2["host"]["region"], "--ec2cert", cert_ec2, ] )
def run(cls, info): lsb_distribution = log_check_call(['chroot', info.root, 'lsb_release', '-i', '-s']) lsb_description = log_check_call(['chroot', info.root, 'lsb_release', '-d', '-s']) lsb_release = log_check_call(['chroot', info.root, 'lsb_release', '-r', '-s']) info._gce['lsb_distribution'] = lsb_distribution[0] info._gce['lsb_description'] = lsb_description[0] info._gce['lsb_release'] = lsb_release[0]
def run(cls, info): expand_root_script = os.path.join(ASSETS_DIR, 'expand-root.sh') expand_root_service = os.path.join(ASSETS_DIR, 'expand-root.service') expand_root_script_dest = os.path.join(info.root, 'usr/bin/expand-root.sh') expand_root_service_dest = os.path.join( info.root, 'lib/systemd/system/expand-root.service') filesystem_type = info.manifest.plugins['expand_root'].get( 'filesystem_type') root_device = info.manifest.plugins['expand_root'].get('root_device') root_partition = info.manifest.plugins['expand_root'].get( 'root_partition') # Copy files over shutil.copy(expand_root_script, expand_root_script_dest) os.chmod(expand_root_script_dest, 0750) shutil.copy(expand_root_service, expand_root_service_dest) # Expand out options into expand-root.sh script. opts = '%s %s %s' % (root_device, root_partition, filesystem_type) sed_i(expand_root_service_dest, r'^ExecStart=/usr/bin/expand-root.sh.*$', 'ExecStart=/usr/bin/expand-root.sh %s' % opts) # Enable systemd service log_check_call([ 'chroot', info.root, 'systemctl', 'enable', 'expand-root.service' ])
def run(cls, info): if 'ec2-get-credentials' in info.initd['install']: log.warn('You are using a static public key for the admin account.' 'This will conflict with the ec2 public key injection mechanism.' 'The ec2-get-credentials startup script will therefore not be enabled.') del info.initd['install']['ec2-get-credentials'] # Get the stuff we need (username & public key) username = info.manifest.plugins['admin_user']['username'] with open(info.manifest.plugins['admin_user']['pubkey']) as pubkey_handle: pubkey = pubkey_handle.read() # paths ssh_dir_rel = os.path.join('home', username, '.ssh') auth_keys_rel = os.path.join(ssh_dir_rel, 'authorized_keys') ssh_dir_abs = os.path.join(info.root, ssh_dir_rel) auth_keys_abs = os.path.join(info.root, auth_keys_rel) # Create the ssh dir if nobody has created it yet if not os.path.exists(ssh_dir_abs): os.mkdir(ssh_dir_abs, 0700) # Create (or append to) the authorized keys file (and chmod u=rw,go=) import stat with open(auth_keys_abs, 'a') as auth_keys_handle: auth_keys_handle.write(pubkey + '\n') os.chmod(auth_keys_abs, (stat.S_IRUSR | stat.S_IWUSR)) # Set the owner of the authorized keys file # (must be through chroot, the host system doesn't know about the user) from bootstrapvz.common.tools import log_check_call log_check_call(['chroot', info.root, 'chown', '-R', (username + ':' + username), ssh_dir_rel])
def run(cls, info): from bootstrapvz.common.tools import log_check_call for raw_command in info.manifest.plugins['image_commands']['commands']: command = map( lambda part: part.format(root=info.root, **info.manifest_vars), raw_command) log_check_call(command)
def run(cls, info): version = '2.16.4' drivers_url = 'http://downloads.sourceforge.net/project/e1000/ixgbevf stable/%s/ixgbevf-%s.tar.gz' % (version, version) archive = os.path.join(info.root, 'tmp', 'ixgbevf-%s.tar.gz' % (version)) module_path = os.path.join(info.root, 'usr', 'src', 'ixgbevf-%s' % (version)) import urllib urllib.urlretrieve(drivers_url, archive) from bootstrapvz.common.tools import log_check_call log_check_call(['tar', '--ungzip', '--extract', '--file', archive, '--directory', os.path.join(info.root, 'usr', 'src')]) with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: dkms_conf.write("""PACKAGE_NAME="ixgbevf" PACKAGE_VERSION="%s" CLEAN="cd src/; make clean" MAKE="cd src/; make BUILD_KERNEL=${kernelver}" BUILT_MODULE_LOCATION[0]="src/" BUILT_MODULE_NAME[0]="ixgbevf" DEST_MODULE_LOCATION[0]="/updates" DEST_MODULE_NAME[0]="ixgbevf" AUTOINSTALL="yes" """ % (version)) for task in ['add', 'build', 'install']: # Invoke DKMS task using specified kernel module (-m) and version (-v) log_check_call(['chroot', info.root, 'dkms', task, '-m', 'ixgbevf', '-v', version, '-k', info.kernel_version])
def run(cls, info): log_check_call(["chroot", info.root, "apt-get", "clean"]) lists = os.path.join(info.root, "var/lib/apt/lists") for list_file in [os.path.join(lists, f) for f in os.listdir(lists)]: if os.path.isfile(list_file): os.remove(list_file)
def _before_create(self, e): self.image_path = e.image_path lv_size = str(self.size.bytes.get_qty_in('MiB')) log_check_call([ 'lvcreate', '--size', '{mib}M'.format(mib=lv_size), '--name', self.lv, self.vg ])
def boot_image(manifest, build_server, bootstrap_info): image_id = None try: import os from bootstrapvz.common.tools import log_check_call docker_machine = build_server.run_settings.get('docker', {}).get( 'machine', None) docker_env = os.environ.copy() if docker_machine is not None: cmd = ( 'eval "$(docker-machine env {machine})" && ' 'echo $DOCKER_HOST && echo $DOCKER_CERT_PATH && echo $DOCKER_TLS_VERIFY' .format(machine=docker_machine)) [docker_host, docker_cert_path, docker_tls] = log_check_call([cmd], shell=True) docker_env['DOCKER_TLS_VERIFY'] = docker_tls docker_env['DOCKER_HOST'] = docker_host docker_env['DOCKER_CERT_PATH'] = docker_cert_path docker_env['DOCKER_MACHINE_NAME'] = docker_machine from bootstrapvz.remote.build_servers.local import LocalBuildServer image_id = bootstrap_info._docker['image_id'] if not isinstance(build_server, LocalBuildServer): import tempfile handle, image_path = tempfile.mkstemp() os.close(handle) remote_image_path = os.path.join('/tmp', image_id) try: log.debug('Saving remote image to file') build_server.remote_command([ 'sudo', 'docker', 'save', '--output=' + remote_image_path, image_id, ]) log.debug('Downloading remote image') build_server.download(remote_image_path, image_path) log.debug('Importing image') log_check_call(['docker', 'load', '--input=' + image_path], env=docker_env) except (Exception, KeyboardInterrupt): raise finally: log.debug( 'Deleting exported image from build server and locally') build_server.delete(remote_image_path) os.remove(image_path) log.debug('Deleting image from build server') build_server.remote_command([ 'sudo', 'docker', 'rmi', bootstrap_info._docker['image_id'] ]) from image import Image with Image(image_id, docker_env) as container: yield container finally: if image_id is not None: log.debug('Deleting image') log_check_call(['docker', 'rmi', image_id], env=docker_env)
def run(cls, info): folder_backup_name = '{id}.{ext}.backup'.format( id=info.run_id, ext=info.volume.extension) destination = os.path.join(info.manifest.bootstrapper['workspace'], folder_backup_name) log_check_call(['cp', '-a', info.volume.path, destination]) msg = 'A copy of the bootstrapped volume was created. Path: ' + destination log.info(msg)
def validate_manifest(data, validator, error): from bootstrapvz.common.tools import log_check_call import os.path schema_path = os.path.join(os.path.dirname(__file__), 'schema.yaml') validator(data, schema_path) log_check_call(['debconf-set-selections', '--checkonly'], stdin=data['plugins']['debconf'])
def run(cls, info): image_description = info._gce['lsb_description'] if 'description' in info.manifest.image: image_description = info.manifest.image['description'] log_check_call(['gcutil', '--project=' + info.manifest.image['gce_project'], 'addimage', info._gce['image_name'], info.manifest.image['gcs_destination'] + info._gce['tarball_name'], '--description=' + image_description])
def _before_mount(self, e): """Mount the partition """ log_check_call([ 'mount', '--types', self.filesystem, self.device_path, e.destination ]) self.mount_dir = e.destination
def run(cls, info): from bootstrapvz.common.tools import log_check_call from . import assets # c.f. http://anonscm.debian.org/cgit/pkg-systemd/systemd.git/commit/?id=61e055638cea udev_file = os.path.join(info.root, "usr/share/initramfs-tools/scripts/init-top/udev") diff_file = os.path.join(assets, "udev.diff") log_check_call(["patch", "--no-backup-if-mismatch", udev_file, diff_file])
def run(cls, info): info.root = os.path.join(info.workspace, 'root') log_check_call([ 'cp', '-a', info.manifest.plugins['prebootstrapped']['folder'], info.root ]) info.volume.path = info.root info.volume.fsm.current = 'attached'
def run(cls, info): import contextlib import re import urllib import urlparse # The current download URL needs to be determined dynamically via a sha1sum file. Here's the # necessary logic. cloudsdk_download_site = 'https://dl.google.com/dl/cloudsdk/release/' cloudsdk_filelist_url = urlparse.urljoin(cloudsdk_download_site, 'sha1.txt') cloudsdk_pathname_regexp = r'^packages/google-cloud-sdk-coretools-linux-[0-9]+\.tar\.gz$' cloudsdk_filename = '' # This is set in the 'with' block below. with contextlib.closing( urllib.urlopen(cloudsdk_filelist_url)) as cloudsdk_filelist: # cloudsdk_filelist is in sha1sum format, so <hash><whitespace><pathname> # pathname is a suffix relative to cloudsdk_download_site # # Retrieve the pathname which matches cloudsdk_pathname_regexp. It's currently safe to # assume that only one pathname will match. for cloudsdk_filelist_line in cloudsdk_filelist: _, pathname = cloudsdk_filelist_line.split() if re.match(cloudsdk_pathname_regexp, pathname): # Don't use os.path.basename since we're actually parsing a URL # suffix, not a path. Same probable result, but wrong semantics. # # The format of pathname is already known to match # cloudsdk_pathname_regexp, so this is safe. _, cloudsdk_filename = pathname.rsplit('/', 1) break cloudsdk_download_dest = os.path.join(info.workspace, cloudsdk_filename) cloudsdk_url = urlparse.urljoin(cloudsdk_download_site, pathname) urllib.urlretrieve(cloudsdk_url, cloudsdk_download_dest) # Make a "mental note" of which file to remove in the system cleaning phase. info._google_cloud_sdk['tarball_pathname'] = cloudsdk_download_dest cloudsdk_directory = os.path.join(info.root, 'usr/local/share/google') os.makedirs(cloudsdk_directory) log_check_call( ['tar', 'xaf', cloudsdk_download_dest, '-C', cloudsdk_directory]) # We need to symlink certain programs from the Cloud SDK bin directory into /usr/local/bin. # Keep a list and do it in a unified way. Naturally this will go away with proper packaging. gcloud_programs = [ 'bq', 'gsutil', 'gcutil', 'gcloud', 'git-credential-gcloud.sh' ] for prog in gcloud_programs: src = os.path.join('..', 'share', 'google', 'google-cloud-sdk', 'bin', prog) dest = os.path.join(info.root, 'usr', 'local', 'bin', prog) os.symlink(src, dest)
def run(cls, info): from bootstrapvz.common.tools import log_check_call command = [ "chroot %s pip list --format freeze" % (info.root) ] pips = log_check_call(command, shell=True) info._ec2_metadata['pips'] = pips command3 = [ "chroot %s pip3 list --format freeze" % (info.root) ] pips3 = log_check_call(command, shell=True) info._ec2_metadata['pips3'] = pips3
def unmount(self): """Performs the unmount operation or asks the partition to unmount itself """ # If its a partition, it can unmount itself if isinstance(self.source, AbstractPartition): self.source.unmount() else: log_check_call(['umount', self.mount_dir]) del self.mount_dir
def _before_create(self, event): volume = event.volume # Disk alignment still plays a role in virtualized environment, # but I honestly have no clue as to what best practice is here, so we choose 'none' log_check_call(['parted', '--script', '--align', 'none', volume.device_path, '--', 'mklabel', 'msdos']) # Create the partitions for partition in self.partitions: partition.create(volume)
def _before_unmount(self, e): """Unmount any mounts associated with this partition """ # Unmount the mounts in descending order of mounpoint path length # You cannot unmount /dev before you have unmounted /dev/pts for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True): self.mounts[destination].unmount() log_check_call(['umount', self.mount_dir]) del self.mount_dir
def run(cls, info): key_file = os.path.join(info.root, 'google.gpg.key') log_check_call([ 'wget', 'https://packages.cloud.google.com/apt/doc/apt-key.gpg', '-O', key_file ]) log_check_call( ['chroot', info.root, 'apt-key', 'add', 'google.gpg.key']) os.remove(key_file)
def run(cls, info): log_check_call( [ "gsutil", "cp", info._gce["tarball_path"], info.manifest.provider["gcs_destination"] + info._gce["tarball_name"], ] )
def run(cls, info): from bootstrapvz.common.tools import log_check_call from . import assets # c.f. http://anonscm.debian.org/cgit/pkg-systemd/systemd.git/commit/?id=61e055638cea udev_file = os.path.join( info.root, 'usr/share/initramfs-tools/scripts/init-top/udev') diff_file = os.path.join(assets, 'udev.diff') log_check_call( ['patch', '--no-backup-if-mismatch', udev_file, diff_file])
def run(cls, info): image_name = info.manifest.name.format(**info.manifest_vars) filename = image_name + '.' + info.volume.extension tarball_name = image_name + '.tar.gz' tarball_path = os.path.join(info.manifest.bootstrapper['workspace'], tarball_name) info._oracle['tarball_path'] = tarball_path log_check_call(['tar', '--sparse', '-C', info.manifest.bootstrapper['workspace'], '-caf', tarball_path, filename])
def download(self, src, dst): log.debug('Downloading file `{src}\' from ' 'build server `{name}\' to `{dst}\'' .format(src=src, dst=dst, name=self.name)) # Make sure we can read the file as {user} self._remote_command(['sudo', 'chown', self.username, src]) src_arg = '{user}@{host}:{path}'.format(user=self.username, host=self.address, path=src) log_check_call(['scp', '-i', self.keyfile, '-P', str(self.port), src_arg, dst])
def run(cls, info): key_file = os.path.join(info.root, 'google.gpg.key') log_check_call([ 'wget', 'https://goog-repo.appspot.com/debian/key/public.gpg.key', '-O', key_file ]) log_check_call( ['chroot', info.root, 'apt-key', 'add', 'google.gpg.key']) os.remove(key_file)
def run(cls, info): from bootstrapvz.common.tools import log_check_call password_crypted = info.manifest.plugins['root_password'].get('password-crypted', None) if password_crypted is not None: log_check_call(['chroot', info.root, '/usr/sbin/chpasswd', '--encrypted'], 'root:' + password_crypted) else: log_check_call(['chroot', info.root, '/usr/sbin/chpasswd'], 'root:' + info.manifest.plugins['root_password']['password'])
def run(cls, info): image_description = info._gce['lsb_description'] if 'description' in info.manifest.provider: image_description = info.manifest.provider['description'] image_description = image_description.format(**info.manifest_vars) log_check_call(['gcloud', 'compute', '--project=' + info.manifest.provider['gce_project'], 'images', 'create', info._gce['image_name'], '--source-uri=' + info.manifest.provider['gcs_destination'] + info._gce['tarball_name'], '--description=' + image_description])
def run(cls, info): image_name = info.manifest.name.format(**info.manifest_vars) filename = image_name + "." + info.volume.extension tarball_name = image_name + ".tar.gz" tarball_path = os.path.join(info.manifest.bootstrapper["workspace"], tarball_name) log_check_call( ["tar", "--sparse", "-C", info.manifest.bootstrapper["workspace"], "-caf", tarball_path, filename] )
def _before_mount(self, e): """Mount the partition """ if self.mountopts is None: mount_command = ['mount', '--types', self.filesystem, self.device_path, e.destination] else: mount_command = ['mount', '--options', ",".join(self.mountopts), '--types', self.filesystem, self.device_path, e.destination] # Mount the partition log_check_call(mount_command) self.mount_dir = e.destination
def run(cls, info): lsb_distribution = log_check_call( ['chroot', info.root, 'lsb_release', '-i', '-s']) lsb_description = log_check_call( ['chroot', info.root, 'lsb_release', '-d', '-s']) lsb_release = log_check_call( ['chroot', info.root, 'lsb_release', '-r', '-s']) info._gce['lsb_distribution'] = lsb_distribution[0] info._gce['lsb_description'] = lsb_description[0] info._gce['lsb_release'] = lsb_release[0]