def __call__(self, config, sectionname): value = BaseMassager.__call__(self, config, sectionname) mounts = [] for line in value.splitlines(): mount_options = line.split() if not len(mount_options): continue options = {} for mount_option in mount_options: if '=' not in mount_option: raise ValueError( "Mount option '%s' contains no equal sign." % mount_option) (key, value) = mount_option.split('=') (key, value) = (key.strip(), value.strip()) if key == 'create': value = value_asbool(value) if value is None: raise ValueError( "Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname)) if key == 'ro': value = value_asbool(value) if value is None: raise ValueError( "Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname)) options[key] = value mounts.append(options) return tuple(mounts)
def __call__(self, config, sectionname): value = BaseMassager.__call__(self, config, sectionname) mounts = [] for line in value.splitlines(): mount_options = line.split() if not len(mount_options): continue options = {} for mount_option in mount_options: if "=" not in mount_option: raise ValueError("Mount option '%s' contains no equal sign." % mount_option) (key, value) = mount_option.split("=") (key, value) = (key.strip(), value.strip()) if key == "create": value = value_asbool(value) if value is None: raise ValueError( "Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname) ) if key == "ro": value = value_asbool(value) if value is None: raise ValueError( "Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname) ) options[key] = value mounts.append(options) return tuple(mounts)
def update_backend(use_pypi=False, index='dev', build=True, user=None, version=None): """ Install the backend from the given devpi index at the given version on the target host and restart the service. If version is None, it defaults to the latest version Optionally, build and upload the application first from local sources. This requires a full backend development environment on the machine running this command (pyramid etc.) """ get_vars() if value_asbool(build): upload_backend(index=index, user=user) with fab.cd('{apphome}'.format(**AV)): if value_asbool(use_pypi): command = 'bin/pip install --upgrade briefkasten' else: command = 'bin/pip install --upgrade --pre -i {ploy_default_publish_devpi}/briefkasten/{index}/+simple/ briefkasten'.format( index=index, user=user, **AV) if version: command = '%s==%s' % (command, version) fab.sudo(command) briefkasten_ctl('restart')
def reset_cleansers(confirm=True): """destroys all cleanser slaves and their rollback snapshots, as well as the initial master snapshot - this allows re-running the jailhost deployment to recreate fresh cleansers.""" if value_asbool(confirm) and not yesno("""\nObacht! This will destroy any existing and or currently running cleanser jails. Are you sure that you want to continue?"""): exit("Glad I asked...") get_vars() cleanser_count = AV['ploy_cleanser_count'] # make sure no workers interfere: fab.run('ezjail-admin stop worker') # stop and nuke the cleanser slaves for cleanser_index in range(cleanser_count): cindex = '{:02d}'.format(cleanser_index + 1) fab.run('ezjail-admin stop cleanser_{cindex}'.format(cindex=cindex)) with fab.warn_only(): fab.run('zfs destroy tank/jails/cleanser_{cindex}@jdispatch_rollback'.format(cindex=cindex)) fab.run('ezjail-admin delete -fw cleanser_{cindex}'.format(cindex=cindex)) fab.run('umount -f /usr/jails/cleanser_{cindex}'.format(cindex=cindex)) fab.run('rm -rf /usr/jails/cleanser_{cindex}'.format(cindex=cindex)) with fab.warn_only(): # remove master snapshot fab.run('zfs destroy -R tank/jails/cleanser@clonesource') # restart worker and cleanser to prepare for subsequent ansible configuration runs fab.run('ezjail-admin start worker') fab.run('ezjail-admin stop cleanser') fab.run('ezjail-admin start cleanser')
def reset_jails(confirm=True, keep_cleanser_master=True): """ stops, deletes and re-creates all jails. since the cleanser master is rather large, that one is omitted by default. """ if value_asbool(confirm) and not yesno("""\nObacht! This will destroy all existing and or currently running jails on the host. Are you sure that you want to continue?"""): exit("Glad I asked...") reset_cleansers(confirm=False) jails = ['appserver', 'webserver', 'worker'] if not value_asbool(keep_cleanser_master): jails.append('cleanser') with fab.warn_only(): for jail in jails: fab.run('ezjail-admin delete -fw {jail}'.format(jail=jail)) # remove authorized keys for no longer existing key (they are regenerated for each new worker) fab.run('rm /usr/jails/cleanser/usr/home/cleanser/.ssh/authorized_keys')
def reset_jails(confirm=True, keep_cleanser_master=True): """ stops, deletes and re-creates all jails. since the cleanser master is rather large, that one is omitted by default. """ if value_asbool(confirm) and not yesno("""\nObacht! This will destroy all existing and or currently running jails on the host. Are you sure that you want to continue?"""): exit("Glad I asked...") reset_cleansers(confirm=False) jails = ['appserver', 'webserver', 'worker'] if not value_asbool(keep_cleanser_master): jails.append('cleanser') with fab.warn_only(): for jail in jails: fab.run('ezjail-admin delete -fw {jail}'.format(jail=jail)) # remove authorized keys for no longer existing key (they are regenerated for each new worker) fab.run( 'rm /usr/jails/cleanser/usr/home/cleanser/.ssh/authorized_keys')
def update_backend(clean=False, build=True, config_path='production.ini', **kwargs): """ Build the backend, upload it to the remote server, install it there and restart it """ with fab.lcd('../backend'): fab.put(config_path, '/home/halfnarp/backend/production.ini') if value_asbool(build): with fab.settings(warn_only=True): fab.local('rm dist/*.tar.gz') fab.local('make sdist') fab.put('dist/*.tar.gz', '/tmp/backend.tgz') fab.put('requirements.txt', '/home/halfnarp/backend/requirements.txt') with fab.cd('/home/halfnarp/backend/'): fab.sudo('bin/pip install --upgrade --allow-external argparse -r requirements.txt', user='******') fab.sudo('bin/pip install --upgrade --force-reinstall --no-deps /tmp/backend.tgz', user='******') fab.sudo('service halfnarp_backend restart', warn_only=True)
def update_backend(clean=False, build=True, config_path="production.ini", **kwargs): """ Build the backend, upload it to the remote server, install it there and restart it """ with fab.lcd("../backend"): fab.put(config_path, "/home/halfnarp/backend/production.ini") if value_asbool(build): with fab.settings(warn_only=True): fab.local("rm dist/*.tar.gz") fab.local("make sdist") fab.put("dist/*.tar.gz", "/tmp/backend.tgz") fab.put("requirements.txt", "/home/halfnarp/backend/requirements.txt") with fab.cd("/home/halfnarp/backend/"): fab.sudo("bin/pip install --upgrade --allow-external argparse -r requirements.txt", user="******") fab.sudo("bin/pip install --upgrade --force-reinstall --no-deps /tmp/backend.tgz", user="******") fab.sudo("service halfnarp_backend restart", warn_only=True)
def reset_cleansers(confirm=True): """destroys all cleanser slaves and their rollback snapshots, as well as the initial master snapshot - this allows re-running the jailhost deployment to recreate fresh cleansers.""" if value_asbool(confirm) and not yesno("""\nObacht! This will destroy any existing and or currently running cleanser jails. Are you sure that you want to continue?"""): exit("Glad I asked...") get_vars() cleanser_count = AV['ploy_cleanser_count'] # make sure no workers interfere: fab.run('ezjail-admin stop worker') # stop and nuke the cleanser slaves for cleanser_index in range(cleanser_count): cindex = '{:02d}'.format(cleanser_index + 1) fab.run('ezjail-admin stop cleanser_{cindex}'.format(cindex=cindex)) with fab.warn_only(): fab.run( 'zfs destroy tank/jails/cleanser_{cindex}@jdispatch_rollback'. format(cindex=cindex)) fab.run('ezjail-admin delete -fw cleanser_{cindex}'.format( cindex=cindex)) fab.run( 'umount -f /usr/jails/cleanser_{cindex}'.format(cindex=cindex)) fab.run( 'rm -rf /usr/jails/cleanser_{cindex}'.format(cindex=cindex)) with fab.warn_only(): # remove master snapshot fab.run('zfs destroy -R tank/jails/cleanser@clonesource') # restart worker and cleanser to prepare for subsequent ansible configuration runs fab.run('ezjail-admin start worker') fab.run('ezjail-admin stop cleanser') fab.run('ezjail-admin start cleanser')
def update_backend(clean=False, build=True, config_path='production.ini', **kwargs): """ Build the backend, upload it to the remote server, install it there and restart it """ with fab.lcd('../backend'): fab.put(config_path, '/home/halfnarp/backend/production.ini') if value_asbool(build): with fab.settings(warn_only=True): fab.local('rm dist/*.tar.gz') fab.local('make sdist') fab.put('dist/*.tar.gz', '/tmp/backend.tgz') fab.put('requirements.txt', '/home/halfnarp/backend/requirements.txt') with fab.cd('/home/halfnarp/backend/'): fab.sudo( 'bin/pip install --upgrade --allow-external argparse -r requirements.txt', user='******') fab.sudo( 'bin/pip install --upgrade --force-reinstall --no-deps /tmp/backend.tgz', user='******') fab.sudo('service halfnarp_backend restart', warn_only=True)
def _bootstrap(): bu = BootstrapUtils() bu.generate_ssh_keys() bu.print_bootstrap_files() # gather infos if not bu.bsd_url: print( "Found no FreeBSD system to install, please specify bootstrap-bsd-url and make sure mfsbsd is running" ) return # get realmem here, because it may fail and we don't want that to happen # in the middle of the bootstrap realmem = bu.realmem print("\nFound the following disk devices on the system:\n %s" % ' '.join(bu.sysctl_devices)) if bu.first_interface: print( "\nFound the following network interfaces, now is your chance to update your rc.conf accordingly!\n %s" % ' '.join(bu.phys_interfaces)) else: print("\nWARNING! Found no suitable network interface!") template_context = { "ploy_jail_host_pkg_repository": "pkg+http://pkg.freeBSD.org/${ABI}/quarterly" } # first the config, so we don't get something essential overwritten template_context.update(env.instance.config) template_context.update(devices=bu.sysctl_devices, interfaces=bu.phys_interfaces, hostname=env.instance.id) rc_conf = bu.bootstrap_files['rc.conf'].read(template_context) if not rc_conf.endswith('\n'): print( "\nERROR! Your rc.conf doesn't end in a newline:\n==========\n%s<<<<<<<<<<\n" % rc_conf) return rc_conf_lines = rc_conf.split('\n') for interface in [ bu.first_interface, env.instance.config.get('ansible-dhcp_host_sshd_interface') ]: if interface is None: continue ifconfig = 'ifconfig_%s' % interface for line in rc_conf_lines: if line.strip().startswith(ifconfig): break else: if not yesno( "\nDidn't find an '%s' setting in rc.conf. You sure that you want to continue?" % ifconfig): return yes = env.instance.config.get('bootstrap-yes', False) if not (yes or yesno( "\nContinuing will destroy the existing data on the following devices:\n %s\n\nContinue?" % ' '.join(bu.devices))): return # install FreeBSD in ZFS root devices_args = ' '.join('-d %s' % x for x in bu.devices) system_pool_name = env.instance.config.get('bootstrap-system-pool-name', 'system') data_pool_name = env.instance.config.get('bootstrap-data-pool-name', 'tank') swap_arg = '' swap_size = env.instance.config.get('bootstrap-swap-size', '%iM' % (realmem * 2)) if swap_size: swap_arg = '-s %s' % swap_size system_pool_arg = '' system_pool_size = env.instance.config.get('bootstrap-system-pool-size', '20G') if system_pool_size: system_pool_arg = '-z %s' % system_pool_size run('destroygeom {devices_args} -p {system_pool_name} -p {data_pool_name}'. format(devices_args=devices_args, system_pool_name=system_pool_name, data_pool_name=data_pool_name)) run('{env_vars}{zfsinstall} {devices_args} -p {system_pool_name} -V 28 -u {bsd_url} {swap_arg} {system_pool_arg}' .format(env_vars=bu.env_vars, zfsinstall=bu.zfsinstall, devices_args=devices_args, system_pool_name=system_pool_name, bsd_url=bu.bsd_url, swap_arg=swap_arg, system_pool_arg=system_pool_arg), shell=False) # create partitions for data pool, but only if the system pool doesn't use # the whole disk anyway if system_pool_arg: for device in bu.devices: run('gpart add -t freebsd-zfs -l {data_pool_name}_{device} {device}' .format(data_pool_name=data_pool_name, device=device)) # mount devfs inside the new system if 'devfs on /rw/dev' not in bu.mounts: run('mount -t devfs devfs /mnt/dev') # setup bare essentials run('cp /etc/resolv.conf /mnt/etc/resolv.conf', warn_only=True) bu.create_bootstrap_directories() bu.upload_bootstrap_files(template_context) bootstrap_packages = ['python27'] if value_asbool(env.instance.config.get('firstboot-update', 'false')): bootstrap_packages.append('firstboot-freebsd-update') run('''touch /mnt/firstboot''') run('''sysrc -f /mnt/etc/rc.conf firstboot_freebsd_update_enable=YES''' ) # we need to install python here, because there is no way to install it via # ansible playbooks bu.install_pkg('/mnt', chroot=True, packages=bootstrap_packages) # set autoboot delay autoboot_delay = env.instance.config.get('bootstrap-autoboot-delay', '-1') run('echo autoboot_delay=%s >> /mnt/boot/loader.conf' % autoboot_delay) bu.generate_remote_ssh_keys() # reboot if value_asbool(env.instance.config.get('bootstrap-reboot', 'true')): with settings(hide('warnings'), warn_only=True): run('reboot')
def bootstrap(**kwargs): """ bootstrap an instance booted into mfsbsd (http://mfsbsd.vx.sk) """ env.shell = '/bin/sh -c' # default ssh settings for mfsbsd with possible overwrite by bootstrap-fingerprint fingerprint = env.instance.config.get( 'bootstrap-fingerprint', '02:2e:b4:dd:c3:8a:b7:7b:ba:b2:4a:f0:ab:13:f4:2d') env.instance.config['fingerprint'] = fingerprint env.instance.config['password-fallback'] = True env.instance.config['password'] = '******' # allow overwrites from the commandline env.instance.config.update(kwargs) bu = BootstrapUtils() bu.generate_ssh_keys() bu.print_bootstrap_files() # gather infos if not bu.bsd_url: print("Found no FreeBSD system to install, please specify bootstrap-bsd-url and make sure mfsbsd is running") return # get realmem here, because it may fail and we don't want that to happen # in the middle of the bootstrap realmem = bu.realmem print("\nFound the following disk devices on the system:\n %s" % ' '.join(bu.sysctl_devices)) if bu.first_interface: print("\nFound the following network interfaces, now is your chance to update your rc.conf accordingly!\n %s" % ' '.join(bu.phys_interfaces)) else: print("\nWARNING! Found no suitable network interface!") template_context = {} # first the config, so we don't get something essential overwritten template_context.update(env.instance.config) template_context.update( devices=bu.sysctl_devices, interfaces=bu.phys_interfaces, hostname=env.instance.id) rc_conf = bu.bootstrap_files['rc.conf'].read(template_context) if not rc_conf.endswith('\n'): print("\nERROR! Your rc.conf doesn't end in a newline:\n==========\n%s<<<<<<<<<<\n" % rc_conf) return rc_conf_lines = rc_conf.split('\n') for interface in [bu.first_interface, env.instance.config.get('ansible-dhcp_host_sshd_interface')]: if interface is None: continue ifconfig = 'ifconfig_%s' % interface for line in rc_conf_lines: if line.strip().startswith(ifconfig): break else: if not yesno("\nDidn't find an '%s' setting in rc.conf. You sure that you want to continue?" % ifconfig): return yes = env.instance.config.get('bootstrap-yes', False) if not (yes or yesno("\nContinuing will destroy the existing data on the following devices:\n %s\n\nContinue?" % ' '.join(bu.devices))): return # install FreeBSD in ZFS root devices_args = ' '.join('-d %s' % x for x in bu.devices) system_pool_name = env.instance.config.get('bootstrap-system-pool-name', 'system') data_pool_name = env.instance.config.get('bootstrap-data-pool-name', 'tank') swap_arg = '' swap_size = env.instance.config.get('bootstrap-swap-size', '%iM' % (realmem * 2)) if swap_size: swap_arg = '-s %s' % swap_size system_pool_arg = '' system_pool_size = env.instance.config.get('bootstrap-system-pool-size', '20G') if system_pool_size: system_pool_arg = '-z %s' % system_pool_size run('destroygeom {devices_args} -p {system_pool_name} -p {data_pool_name}'.format( devices_args=devices_args, system_pool_name=system_pool_name, data_pool_name=data_pool_name)) run('{zfsinstall} {devices_args} -p {system_pool_name} -V 28 -u {bsd_url} {swap_arg} {system_pool_arg}'.format( zfsinstall=bu.zfsinstall, devices_args=devices_args, system_pool_name=system_pool_name, bsd_url=bu.bsd_url, swap_arg=swap_arg, system_pool_arg=system_pool_arg)) # create partitions for data pool, but only if the system pool doesn't use # the whole disk anyway if system_pool_arg: for device in bu.devices: run('gpart add -t freebsd-zfs -l {data_pool_name}_{device} {device}'.format( data_pool_name=data_pool_name, device=device)) # mount devfs inside the new system if 'devfs on /rw/dev' not in bu.mounts: run('mount -t devfs devfs /mnt/dev') # setup bare essentials run('cp /etc/resolv.conf /mnt/etc/resolv.conf') bu.create_bootstrap_directories() bu.upload_bootstrap_files(template_context) # we need to install python here, because there is no way to install it via # ansible playbooks bu.install_pkg('/mnt', chroot=True, packages=['python27']) # set autoboot delay autoboot_delay = env.instance.config.get('bootstrap-autoboot-delay', '-1') run('echo autoboot_delay=%s >> /mnt/boot/loader.conf' % autoboot_delay) bu.generate_remote_ssh_keys() # reboot if value_asbool(env.instance.config.get('bootstrap-reboot', 'true')): with settings(hide('warnings'), warn_only=True): run('reboot')
def test_value_asbool(value, expected): from ploy.config import value_asbool assert value_asbool(value) == expected
def _bootstrap(): bu = BootstrapUtils() bu.generate_ssh_keys() bu.print_bootstrap_files() # gather infos if not bu.bsd_url: print("Found no FreeBSD system to install, please use 'special edition' or specify bootstrap-bsd-url and make sure mfsbsd is running") return # get realmem here, because it may fail and we don't want that to happen # in the middle of the bootstrap realmem = bu.realmem print("\nFound the following disk devices on the system:\n %s" % ' '.join(bu.sysctl_devices)) if bu.first_interface: print("\nFound the following network interfaces, now is your chance to update your rc.conf accordingly!\n %s" % ' '.join(bu.phys_interfaces)) else: print("\nWARNING! Found no suitable network interface!") template_context = {"ploy_jail_host_pkg_repository": "pkg+http://pkg.freeBSD.org/${ABI}/quarterly"} # first the config, so we don't get something essential overwritten template_context.update(env.instance.config) template_context.update( devices=bu.sysctl_devices, interfaces=bu.phys_interfaces, hostname=env.instance.id) rc_conf = bu.bootstrap_files['rc.conf'].read(template_context) if not rc_conf.endswith('\n'): print("\nERROR! Your rc.conf doesn't end in a newline:\n==========\n%s<<<<<<<<<<\n" % rc_conf) return rc_conf_lines = rc_conf.split('\n') for interface in [bu.first_interface, env.instance.config.get('ansible-dhcp_host_sshd_interface')]: if interface is None: continue ifconfig = 'ifconfig_%s' % interface for line in rc_conf_lines: if line.strip().startswith(ifconfig): break else: if not yesno("\nDidn't find an '%s' setting in rc.conf. You sure that you want to continue?" % ifconfig): return yes = env.instance.config.get('bootstrap-yes', False) if not (yes or yesno("\nContinuing will destroy the existing data on the following devices:\n %s\n\nContinue?" % ' '.join(bu.devices))): return # install FreeBSD in ZFS root devices_args = ' '.join('-d %s' % x for x in bu.devices) system_pool_name = env.instance.config.get('bootstrap-system-pool-name', 'system') data_pool_name = env.instance.config.get('bootstrap-data-pool-name', 'tank') swap_arg = '' swap_size = env.instance.config.get('bootstrap-swap-size', '%iM' % (realmem * 2)) if swap_size: swap_arg = '-s %s' % swap_size system_pool_arg = '' system_pool_size = env.instance.config.get('bootstrap-system-pool-size', '20G') if system_pool_size: system_pool_arg = '-z %s' % system_pool_size run('destroygeom {devices_args} -p {system_pool_name} -p {data_pool_name}'.format( devices_args=devices_args, system_pool_name=system_pool_name, data_pool_name=data_pool_name)) run('{env_vars}{zfsinstall} {devices_args} -p {system_pool_name} -V 28 -u {bsd_url} {swap_arg} {system_pool_arg}'.format( env_vars=bu.env_vars, zfsinstall=bu.zfsinstall, devices_args=devices_args, system_pool_name=system_pool_name, bsd_url=bu.bsd_url, swap_arg=swap_arg, system_pool_arg=system_pool_arg), shell=False) # create partitions for data pool, but only if the system pool doesn't use # the whole disk anyway if system_pool_arg: for device in bu.devices: run('gpart add -t freebsd-zfs -l {data_pool_name}_{device} {device}'.format( data_pool_name=data_pool_name, device=device)) # mount devfs inside the new system if 'devfs on /rw/dev' not in bu.mounts: run('mount -t devfs devfs /mnt/dev') # setup bare essentials run('cp /etc/resolv.conf /mnt/etc/resolv.conf', warn_only=True) bu.create_bootstrap_directories() bu.upload_bootstrap_files(template_context) bootstrap_packages = ['python27'] if value_asbool(env.instance.config.get('firstboot-update', 'false')): bootstrap_packages.append('firstboot-freebsd-update') run('''touch /mnt/firstboot''') run('''sysrc -f /mnt/etc/rc.conf firstboot_freebsd_update_enable=YES''') # we need to install python here, because there is no way to install it via # ansible playbooks bu.install_pkg('/mnt', chroot=True, packages=bootstrap_packages) # set autoboot delay autoboot_delay = env.instance.config.get('bootstrap-autoboot-delay', '-1') run('echo autoboot_delay=%s >> /mnt/boot/loader.conf' % autoboot_delay) bu.generate_remote_ssh_keys() # reboot if value_asbool(env.instance.config.get('bootstrap-reboot', 'true')): with settings(hide('warnings'), warn_only=True): run('reboot')