def enable_spi(): # FIXME: instead of rebooting, add a reboot_required property to # Changed/Unchanged and allow chaining these. # # e.g. # # c = some_operation() # c = c.chain(another_operation, msg=....) # # return c # # at any point in time, c.reboot_required can be resolved, either # in a plan/operation or by the driver ("reboot_if_needed()") # FIXME: check for /etc/modprobe.d/raspi-blacklist.conf # mentioned at https://www.raspberrypi.org/documentation/ # hardware/raspberrypi/spi/README.md with fs.edit('/boot/config.txt', create=False) as boot: boot.insert_line('dtparam=spi=on') boot.insert_line('dtoverlay=spi-bcm2835-overlay') if boot.changed: linux.enable_module('spi_bcm2835', load=False) return Changed('Enabled SPI, need to reboot now') if linux.enable_module('spi_bcm2835').changed: return Changed('SPI kernel module enabled') return Unchanged('SPI already enabled')
def install_git(self, host, repo, user='******', branch='master', egg=None, upgrade=True, editable=False, protocol='git'): # FIXME: with newer git versions, we'd find a way here to pass in # the deployment key, which would allow not having to store # the key on the server if protocol in ('http', 'https'): url = 'git+{}://{}/{}@{}'.format(protocol, host, repo, branch) elif protocol == 'git': url = 'git+ssh://{}@{}/{}@{}'.format(user, host, repo, branch) else: raise ConfigurationError('Unknown protocol: {}'.format(protocol)) if egg is not None: url += '#egg=' + egg # FIXME: determine changes? self.install([url], upgrade=upgrade, editable=editable) return Changed(msg='Installed {}'.format(url))
def ensure_certificate(hostname): cert_rpath = remote.path.join(config['sslcert_cert_dir'], hostname + '.crt') chain_rpath = remote.path.join(config['sslcert_cert_dir'], hostname + '.chain.crt') key_rpath = remote.path.join(config['sslcert_key_dir'], hostname + '.pem') # first, ensure any certificate exists on the host. otherwise, # webservers like nginx will likely not start if not (remote.lstat(cert_rpath) and remote.lstat(key_rpath) and remote.lstat(chain_rpath)): log.debug('Remote certificate {}, key {}, chain {} not found'.format( cert_rpath, key_rpath, chain_rpath)) key, cert = generate_self_signed_cert(hostname) # FIXME: maybe use install cert here. fs.upload_string(key, key_rpath) fs.upload_string(cert, cert_rpath) fs.upload_string(cert, chain_rpath) return Changed( msg='No certificate {} / key {} / chain {} found. A self-signed ' 'certficate from a reputable snake-oil vendor was installed.'. format(cert_rpath, key_rpath, chain_rpath)) return Unchanged( 'Certificate for hostname {} already preset'.format(hostname))
def create_role(self, name, password=None, superuser=False, createdb=False, createrole=False, inherit=True, login=True, connection_limit=-1): # FIXME: should update role if required with self.session() as sess: for role in sess.query(sc.Role): if role.rolname == name: return Unchanged(msg='Role {} already exists'.format(name)) sql = text(' '.join([ 'CREATE ROLE ' + pg_valid(name), 'SUPERUSER' if superuser else 'NOSUPERUSER', 'CREATEDB' if createdb else 'NOCREATEDB', 'CREATEROLE' if createrole else 'NOCREATEROLE', 'INHERIT' if inherit else 'NOINHERIT', 'LOGIN' if login else 'NOLOGIN', 'CONNECTION LIMIT :connection_limit', 'PASSWORD :pw' if password is not None else '', ])) sess.connection().execute(sql, name=name, connection_limit=connection_limit, pw=password) return Changed(msg='Created role {}'.format(name))
def create(self, python='python3', global_site_packages=False): # FIXME: check if python version and global_site_packages are correct, # and correct / recreat (--clear?) otherwise if not remote.stat(self.python): args = [ config['cmd_venv'], '-p', python, ] if global_site_packages: args.append('--system-site-packages') else: args.append('--no-site-packages') args.append(self.remote_path) proc.run(args) return Changed( msg='Initialized virtualenv in {}'.format(self.remote_path)) return Unchanged(msg='virtualenv at {} already initialized'.format( self.remote_path))
def install_docker_compose(): # docker: install docker compose ch = fs.upload_file(docker.webfiles['docker-compose-Linux-x86_64'], '/usr/local/bin/docker-compose').changed ch |= fs.chmod('/usr/local/bin/docker-compose', mode=0o755).changed if ch: return Changed(msg='Installed docker-compose') return Unchanged(msg='docker-compose already installed')
def install_requirements(self, requirements_txt, upgrade=False): # FIXME: collect changes, via freeze? args = [self.pip, 'install', '-r', requirements_txt] if upgrade: args.append('-U') proc.run(args) return Changed(msg='Installed requirements from {} into {}'.format( requirements_txt, self.remote_path))
def expand_root_fs(): dev_size, _, _ = proc.run(['fdisk', '-s', '/dev/mmcblk0']) p1_size, _, _ = proc.run(['fdisk', '-s', '/dev/mmcblk0p1']) p2_size, _, _ = proc.run(['fdisk', '-s', '/dev/mmcblk0p2']) free_space = (int(dev_size) - int(p1_size) - int(p2_size)) * 512 if free_space <= 4 * 1024 * 1024: return Unchanged( msg='Free space is <= 4M. Not expanding root filesystem') else: # FIXME: run fdisk and resize2fs instead of raspi-config? proc.run(['raspi-config', '--expand-rootfs']) return Changed(msg='Expanded root filesystem')
def install(self, pkgs, upgrade=True, reinstall=False, editable=False): # FIXME: collect changes args = [self.pip, 'install'] if editable: args.append('-e') if upgrade: args.append('-U') if reinstall: args.append('-I') args.extend(pkgs) proc.run(args) return Changed(msg='Installed packages {} into virtualenv {}'.format( pkgs, self.remote_path))
def install_strict_ssh(allow_users=['root'], allow_groups=None, address_family="any", permit_root=True, modern_ciphers=True, sftp_enabled=True, agent_forwarding=False, x11=False, tcp_forwarding=True, unix_forwarding=True, tunnel=False, port=22, use_dns=False, print_motd=False, auto_restart=True, check_sshd_config=True, password_enabled=None): # FIXME: change default in jinja templates to strict reporting of missing # values to avoid creating broken ssh configs # FIXME: add (possibly generic) support for atomic-tested-configuration # swaps (i.e. run sshd -t on a config) tpl = ssh_preset.templates.render( 'sshd_config', allow_users=allow_users, allow_groups=allow_groups, address_family=address_family, permit_root=permit_root, modern_ciphers=modern_ciphers, sftp_enabled=sftp_enabled, agent_forwarding=agent_forwarding, x11=x11, tcp_forwarding=tcp_forwarding, unix_forwarding=unix_forwarding, tunnel=tunnel, ports=port if isinstance(port, list) else [port], print_motd=print_motd, password_enabled=password_enabled) if fs.upload_string(tpl, '/etc/ssh/sshd_config').changed: if check_sshd_config: proc.run(['sshd', '-t']) # FIXME: we may want to abstract the init-system here if auto_restart: systemd.restart_unit('ssh.service') return Changed(msg='Changed sshd configuration') return Unchanged(msg='sshd config already strict')
def setup_rsyslog(server_addr): # setup papertrail # FIXME: this is part of remand now changed = False changed = apt.install_packages(['rsyslog-gnutls']).changed changed |= fs.upload_file(papertrail.files['papertrail-bundle.pem'], '/etc/papertrail-bundle.pem').changed changed |= fs.upload_string( papertrail.templates.render('papertrail.conf', addr=server_addr), '/etc/rsyslog.d/papertrail.conf', ).changed if changed: systemd.restart_unit('rsyslog.service') return Changed( msg='Setup papertrail logging to {}'.format(server_addr)) return Unchanged(msg='Papertrail already setup to {}'.format(server_addr))
def install_docker(arch=None): # docker: needs repo, https transport and key ch = apt.install_packages(['apt-transport-https', 'ca-certificates']).changed ch |= apt.add_apt_keys(docker.files['docker-repo-key.asc']).changed ch |= apt.add_repo('debian-jessie', site='https://apt.dockerproject.org/repo', arch=arch, name='docker').changed # remove possibly installed old version ch |= apt.remove_packages(['docker.io', 'lxc-docker'], purge=True).changed # docker: install packages ch |= apt.install_packages(['docker-engine']).changed if ch: return Changed(msg='Installed docker') return Unchanged(msg='docker already installed')
def drop_database(self, name): assert name.isalnum() # check if database exists qry = 'SELECT datname FROM pg_database' dbs = [row[0] for row in self.engine.execute(qry)] if name not in dbs: return Unchanged( 'Database {} already dropped/nonexistant'.format(name)) sql = text('DROP DATABASE ' + pg_valid(name)) with self.session(autocommit=True) as sess: con = sess.connection() con.execute('COMMIT') con.execute(sql) return Changed(msg='Dropped database {}'.format(name))
def enable_auto_upgrades(boot_time='10min', unit_active_time='1d', start=True): timer_tpl = debian.templates.render('autoupdate.timer', boot_time=boot_time, unit_active_time=unit_active_time) c = False # install both timer and service c |= systemd.install_unit_string('autoupdate.timer', timer_tpl).changed c |= systemd.install_unit_file(debian.files['autoupdate.service']).changed # enable both timer and service c |= systemd.enable_unit('autoupdate.timer').changed c |= systemd.enable_unit('autoupdate.service').changed # start timer if start: c |= systemd.start_unit('autoupdate.timer').changed if c: return Changed(msg='Enabled automatic apt-updates via systemd timer') return Unchanged( msg='Automatic apt-updates via systemd timer already enabled')
def enable_letsencrypt(auto_reload=True, remove_default=True): changed = any_changed( fs.upload_file(nginx.files['acme-challenge'], '/etc/nginx/sites-available/acme-challenge'), fs.symlink('/etc/nginx/sites-available/acme-challenge', '/etc/nginx/sites-enabled/00_acme-challenge'), ) fs.create_dir('/var/www/html/.well-known') fs.create_dir('/var/www/html/.well-known/acme-challenge') fs.chmod('/var/www/html/.well-known', mode=0o755) fs.chmod('/var/www/html/.well-known/acme-challenge', mode=0o755) if remove_default: changed |= fs.remove_file('/etc/nginx/sites-enabled/default').changed if changed: if auto_reload: systemd.reload_unit('nginx.service', only_if_running=True) return Changed(msg='Enabled nginx Let\'s encrypt support') return Unchanged(msg='nginx Let\'s encrypt support already enabled')
def create_database(self, name, owner): assert name.isalnum() assert owner.isalnum() # check if database exists qry = 'SELECT datname FROM pg_database' dbs = [row[0] for row in self.engine.execute(qry)] if name in dbs: return Unchanged('Database {} already exists'.format(name)) sql = text(' '.join([ 'CREATE DATABASE ' + pg_valid(name), 'WITH OWNER ' + pg_valid(owner) ])) # runs outside transaction with self.session(autocommit=True) as sess: con = sess.connection() con.execute('COMMIT') con.execute(sql) return Changed(msg='Created database {}'.format(name))
def restart(service): proc.run([remote.path.join(config['sysv_initd'], service), 'restart']) return Changed(msg='Restarted {}'.format(service))
def install_cert(cert, key, cert_name=None, key_name=None): """Installs an SSL certificate with including key on the remote Certificate filenames are unchanged, per default they will be installed in `/etc/ssl`, with the corresponding keys at `/etc/ssl/private`.""" cert_name = cert_name or os.path.basename(cert) key_name = key_name or os.path.basename(key) # small sanity check with open(cert) as f: if 'PRIVATE' in f.read(): raise ValueError( 'You seem to have passed a private key as a cert!') with open(key) as f: if 'PRIVATE' not in f.read(): raise ValueError( '{} does not seem to be a valid private key'.format(key)) # check if remote is reasonably secure cert_dir = config['sslcert_cert_dir'] cert_dir_st = remote.lstat(cert_dir) if not cert_dir_st: raise ConfigurationError( 'Remote SSL dir {} does not exist'.format(cert_dir)) key_dir = config['sslcert_key_dir'] key_dir_st = remote.lstat(key_dir) if not key_dir_st: raise ConfigurationError( 'Remote key dir {} does not exist'.format(key_dir)) SECURE_MODES = (0o700, 0o710) actual_mode = key_dir_st.st_mode & 0o777 if actual_mode not in SECURE_MODES: raise ConfigurationError( 'Mode of remote key dir {} is {:o}, should be one of {:o}'.format( key_dir, actual_mode, SECURE_MODES)) if key_dir_st.st_uid != 0: raise ConfigurationError( 'Remove key dir {} is not owned by root'.format(key_dir)) # we can safely upload the key and cert cert_rpath = remote.path.join(cert_dir, cert_name) key_rpath = remote.path.join(key_dir, key_name) changed = False changed |= fs.upload_file(cert, cert_rpath).changed changed |= fs.upload_file(key, key_rpath).changed changed |= fs.chmod(key_rpath, 0o640).changed changed |= fs.chown(key_rpath, uid='root', gid='ssl-cert').changed if changed: return Changed( msg='Uploaded key pair {}/{}'.format(cert_name, key_name)) return Unchanged( msg='Key pair {}/{} already uploaded'.format(cert_name, key_name))