def provision(common='master'): """Provision master with salt-master and salt-minion.""" require('environment') # Install salt minion with settings(warn_only=True): with hide('running', 'stdout', 'stderr'): installed = run('which salt-call') if not installed: # resolve salt hostnames to localhost sudo('echo "127.0.0.1 salt master" >> /etc/hosts') sudo('echo "salt" >> /etc/hostname') sudo('hostname salt') # install salt-master and salt-minion on master sudo('apt-get update -q -y') sudo('apt-get install python-software-properties -q -y') sudo('add-apt-repository ppa:saltstack/salt -y') sudo('apt-get update -q') sudo('apt-get install salt-master salt-minion -q -y') # temporarily stop minon and master with settings(warn_only=True): sudo('service salt-minion stop') sudo('service salt-master stop') # pre-seed the master's minion with accepted key with cd('/etc/salt/pki/minion/'): sudo('salt-key --gen-keys=minion') with cd('/etc/salt/pki/master/minions/'): sudo('cp /etc/salt/pki/minion/minion.pub /etc/salt/pki/master/minions/salt') # make sure git is installed for gitfs with settings(warn_only=True): with hide('running', 'stdout', 'stderr'): installed = run('which git') if not installed: sudo('apt-get install python-pip git-core -q -y') sudo('pip install -U GitPython') update_salt_config()
def setup_munin(): # sudo('apt-get update') sudo('apt-get install -y munin munin-node munin-plugins-extra spawn-fcgi') put('config/munin.conf', '/etc/munin/munin.conf', use_sudo=True) put('config/spawn_fcgi_munin_graph.conf', '/etc/init.d/spawn_fcgi_munin_graph', use_sudo=True) put('config/spawn_fcgi_munin_html.conf', '/etc/init.d/spawn_fcgi_munin_html', use_sudo=True) sudo('chmod u+x /etc/init.d/spawn_fcgi_munin_graph') sudo('chmod u+x /etc/init.d/spawn_fcgi_munin_html') with settings(warn_only=True): sudo('chown nginx.www-data /var/log/munin/munin-cgi*') sudo('chown nginx.www-data /usr/lib/cgi-bin/munin-cgi*') sudo('chown nginx.www-data /usr/lib/munin/cgi/munin-cgi*') with settings(warn_only=True): sudo('/etc/init.d/spawn_fcgi_munin_graph stop') sudo('/etc/init.d/spawn_fcgi_munin_graph start') sudo('update-rc.d spawn_fcgi_munin_graph defaults') sudo('/etc/init.d/spawn_fcgi_munin_html stop') sudo('/etc/init.d/spawn_fcgi_munin_html start') sudo('update-rc.d spawn_fcgi_munin_html defaults') sudo('/etc/init.d/munin-node restart') with settings(warn_only=True): sudo('chown nginx.www-data /var/log/munin/munin-cgi*') sudo('chown nginx.www-data /usr/lib/cgi-bin/munin-cgi*') sudo('chown nginx.www-data /usr/lib/munin/cgi/munin-cgi*') sudo('chmod a+rw /var/log/munin/*') with settings(warn_only=True): sudo('/etc/init.d/spawn_fcgi_munin_graph start') sudo('/etc/init.d/spawn_fcgi_munin_html start')
def _galaxy_db_exists(env): """ Check if galaxy database already exists. Return ``True`` if it does, ``False`` otherwise. Note that this method does a best-effort attempt at starting the DB server if one is not already running to do a thorough test. It shuts the server down upon completion, but only it if also started it. """ db_exists = False started = False c = _get_galaxy_db_configs(env) if exists(c['psql_data_dir']) and not _dir_is_empty(c['psql_data_dir']): sudo("chown --recursive {0}:{0} {1}".format(c['psql_user'], c['psql_data_dir'])) env.logger.debug("Galaxy database directory {0} already exists.".format(c['psql_data_dir'])) # Check if PostgreSQL is already running and try to start the DB if not if not _postgres_running(env): with settings(warn_only=True): env.logger.debug("Trying to start DB server in {0}".format(c['psql_data_dir'])) sudo("{0}".format(c['pg_start_cmd']), user=c['psql_user']) started = True # Check if galaxy DB already exists if 'galaxy' in sudo("{0} -P pager --list | grep {1} || true".format(c['psql_cmd'], c['galaxy_db_name']), user=c['psql_user']): env.logger.warning("Galaxy database {0} already exists in {1}! Not creating it."\ .format(c['galaxy_db_name'], c['psql_data_dir'])) db_exists = True if started: with settings(warn_only=True): sudo("{0}".format(c['pg_stop_cmd']), user=c['psql_user']) return db_exists
def changeHadoopProperties(path, fileName, propertyDict): if not fileName or not propertyDict: return with cd(path): with settings(warn_only=True): import hashlib replaceHadoopPropertyHash = hashlib.md5(open("replaceHadoopProperty.py", "rb").read()).hexdigest() if run("test %s = `md5sum replaceHadoopProperty.py | cut -d ' ' -f 1`" % replaceHadoopPropertyHash).failed: put("replaceHadoopProperty.py", path + "/") run("chmod +x replaceHadoopProperty.py") with settings(warn_only=True): if not run("test -f %s" % fileName).failed: op = "cp" if CONFIGURATION_FILES_CLEAN: op = "mv" currentBakNumber = getLastBackupNumber(fileName) + 1 run( "%(op)s %(file)s %(file)s.bak%(bakNumber)d" % {"op": op, "file": fileName, "bakNumber": currentBakNumber} ) run("touch %s" % fileName) command = "./replaceHadoopProperty.py '%s' %s" % ( fileName, " ".join(["%s %s" % (str(key), str(value)) for key, value in propertyDict.items()]), ) run(command)
def _install_pkg_version(env, pkg, version, brew_cmd, ipkgs): """Install a specific version of a package by retrieving from git history. https://gist.github.com/gcatlin/1847248 Handles both global packages and those installed via specific taps. """ if ipkgs["current"].get(pkg.split("/")[-1]) == version: return if version == "HEAD": env.safe_run("{brew_cmd} install --HEAD {pkg}".format(**locals())) else: raise ValueError("Cannot currently handle installing brew packages by version.") with _git_pkg_version(env, brew_cmd, pkg, version): if pkg.split("/")[-1] in ipkgs["current"]: with settings(warn_only=True): env.safe_run("{brew_cmd} unlink {pkg}".format( brew_cmd=brew_cmd, pkg=pkg.split("/")[-1])) # if we have a more recent version, uninstall that first cur_version_parts = env.safe_run_output("{brew_cmd} list --versions {pkg}".format( brew_cmd=brew_cmd, pkg=pkg.split("/")[-1])).strip().split() if len(cur_version_parts) > 1 and LooseVersion(cur_version_parts[1]) > LooseVersion(version): with settings(warn_only=True): env.safe_run("{brew_cmd} uninstall {pkg}".format(**locals())) env.safe_run("{brew_cmd} install {pkg}".format(**locals())) with settings(warn_only=True): env.safe_run("{brew_cmd} switch {pkg} {version}".format(**locals())) env.safe_run("%s link --overwrite %s" % (brew_cmd, pkg))
def setup_venv(): with settings(warn_only=True): if run('touch .virtualenv/base/bin/activate').failed: with settings(warn_only=False): run('mkdir .virtualenv') run('virtualenv .virtualenv/base') run('echo "source .virtualenv/base/bin/activate" >> .bashrc')
def setup_python_base(): """ Install non-standard python libraries into the virtual env that is set up by setup_venv() """ simple_packages = ['pymongo', 'bson', 'lockfile', 'pyparsing', 'codepy', 'PIL', ] for pkg in simple_packages: with settings(warn_only=True): if baserun('python -c "import %s"' % pkg).failed: with settings(warn_only=False): baserun('pip install --user %s' % pkg) # install ordereddict for genson odict_test1 = 'python -c "import collections; collections.OrderedDict"' odict_test2 = 'python -c "import ordereddict"' with settings(warn_only=True): if baserun(odict_test1).failed and baserun(odict_test2).failed: with settings(warn_only=False): baserun('pip install --user -vUI ordereddict')
def nginx_configs(): with settings(host_string='[email protected]'): context = {'host': 'Asgard'} upload_template('../nginx/app.nginx.conf', '/etc/nginx/sites-enabled/readthedocs', context=context, backup=False) upload_template('../nginx/lb.nginx.conf', '/etc/nginx/sites-enabled/lb', context=context, backup=False) upload_template('../nginx/main.nginx.conf', '/etc/nginx/nginx.conf', context=context, backup=False) # Perl config sudo('mkdir -p /usr/share/nginx/perl/') put('../salt/nginx/perl/lib/ReadTheDocs.pm', '/usr/share/nginx/perl/ReadTheDocs.pm') with settings(host_string='root@newasgard'): context = {'host': 'Asgard'} upload_template('../nginx/app.nginx.conf', '/etc/nginx/sites-enabled/readthedocs', context=context, backup=False) upload_template('../nginx/lb.nginx.conf', '/etc/nginx/sites-enabled/lb', context=context, backup=False) upload_template('../nginx/main.nginx.conf', '/etc/nginx/nginx.conf', context=context, backup=False) # Perl config sudo('mkdir -p /usr/share/nginx/perl/') put('../salt/nginx/perl/lib/ReadTheDocs.pm', '/usr/share/nginx/perl/ReadTheDocs.pm') with settings(host_string='root@newChimera'): context = {'host': 'Chimera'} upload_template('../nginx/app.nginx.conf', '/etc/nginx/sites-enabled/readthedocs', context=context, backup=False) upload_template('../nginx/lb.nginx.conf', '/etc/nginx/sites-enabled/lb', context=context, backup=False) upload_template('../nginx/main.nginx.conf', '/etc/nginx/nginx.conf', context=context, backup=False) # Perl config sudo('mkdir -p /usr/share/nginx/perl/') put('../salt/nginx/perl/lib/ReadTheDocs.pm', '/usr/share/nginx/perl/ReadTheDocs.pm')
def clean(): with settings(warn_only=True): sudo('service nginx stop') result = sudo('service %s stop' % PROJECT_NAME) if result.failed: warn( "%s was not running." % PROJECT_NAME) with settings(warn_only=True): result = sudo('service %s stop' % PROJECT_NAME_STAGING) if result.failed: warn( "%s was not running." % PROJECT_NAME_STAGING) for name in (PROJECT_NAME, PROJECT_NAME_STAGING): sudo('rmvirtualenv %s' % name) with settings(warn_only=True): for dir in (PROJECT_DIR, PROJECT_DIR_STAGING, PROJECT_LOGDIR, PROJECT_LOGDIR_STAGING): sudo('rm -rf %s' % dir) sudo('rm /home/%s/%s' % (PROJECT_USER, PROJECT_SCRIPT_NAME)) sudo('rm /etc/nginx/sites-enabled/%s' % PROJECT_NAME) sudo('rm /etc/nginx/sites-available/%s' % PROJECT_NAME) sudo('rm /etc/init/%s.conf' % PROJECT_NAME) sudo('rm /etc/init.d/%s' % PROJECT_NAME) sudo('rm /home/%s/%s' % (PROJECT_USER, PROJECT_SCRIPT_NAME_STAGING)) sudo('rm /etc/nginx/sites-enabled/%s' % PROJECT_NAME_STAGING) sudo('rm /etc/nginx/sites-available/%s' % PROJECT_NAME_STAGING) sudo('rm /etc/init/%s.conf' % PROJECT_NAME_STAGING) sudo('rm /etc/init.d/%s' % PROJECT_NAME_STAGING)
def deploy(branch=None): """Deploy to a given environment.""" require('environment') if branch is not None: env.branch = branch requirements = False migrations = False # Fetch latest changes with cd(env.code_root): with settings(user=env.project_user): run('git fetch origin') # Look for new requirements or migrations requirements = match_changes(env.branch, "'requirements\/'") migrations = match_changes(env.branch, "'\/migrations\/'") if requirements or migrations: supervisor_command('stop %(environment)s:*' % env) with settings(user=env.project_user): run("git reset --hard origin/%(branch)s" % env) upload_local_settings() if requirements: update_requirements() # New requirements might need new tables/migrations syncdb() elif migrations: syncdb() collectstatic() supervisor_command('restart %(environment)s:*' % env)
def sync(): """Rysnc local states and pillar data to the master.""" # Check for missing local secrets so that they don't get deleted # project.rsync_project fails if host is not set with settings(host=env.master, host_string=env.master): if not have_secrets(): get_secrets() else: # Check for differences in the secrets files for environment in ['staging', 'production']: remote_file = os.path.join('/srv/pillar/', environment, 'secrets.sls') with lcd(os.path.join(CONF_ROOT, 'pillar', environment)): if files.exists(remote_file): get(remote_file, 'secrets.sls.remote') else: local('touch secrets.sls.remote') with settings(warn_only=True): result = local('diff -u secrets.sls.remote secrets.sls') if result.failed and not confirm(red("Above changes will be made to secrets.sls. Continue?")): abort("Aborted. File have been copied to secrets.sls.remote. " + "Resolve conflicts, then retry.") else: local("rm secrets.sls.remote") salt_root = CONF_ROOT if CONF_ROOT.endswith('/') else CONF_ROOT + '/' project.rsync_project(local_dir=salt_root, remote_dir='/tmp/salt', delete=True) sudo('rm -rf /srv/salt /srv/pillar') sudo('mv /tmp/salt/* /srv/') sudo('rm -rf /tmp/salt/')
def setup_user(): """ Create a new Linux user, set it up for certificate login. Call `setup_passwords`. """ require('hosts', provided_by=[webserver]) require('adminuser') env.new_user = env.user with settings(user=env.adminuser, pty=True): # create user and add it to admin group sudo('adduser "%(new_user)s" --disabled-password --gecos "" && adduser "%(new_user)s" %(sudoers_group)s' % env) # copy authorized_keys from root for certificate login sudo('mkdir %(homepath)s/.ssh && cp /root/.ssh/authorized_keys %(homepath)s/.ssh/' % env) # Now we should be able to login with that new user with settings(warn_only=True): # create web and temp dirs sudo('mkdir -p %(prj_path)s; chown %(new_user)s:%(new_user)s %(prj_path)s;' % env) sudo('mkdir -p %(tmppath)s; chown %(new_user)s:%(new_user)s %(tmppath)s;' % env) # symlink web dir in home run('cd ~; ln -s %(prj_path)s www;' % env) env.user = env.new_user # cd to web dir and activate virtualenv on login run('echo "\ncd %(prj_path)s && source bin/activate\n" >> %(homepath)s/.profile\n' % env, pty=True) setup_passwords()
def install_site(): "Add the virtualhost config file to the webserver's config, activate logrotate" require('release', provided_by=[deploy, setup]) with cd('%(prj_path)s/releases/%(release)s' % env): with settings(user=env.adminuser, pty=True): run('cp server-setup/%(webserver)s.conf /etc/%(webserver)s/sites-available/%(prj_name)s' % env) if env.use_daemontools: # activate new service runner run('cp server-setup/service-run.sh /etc/service/%(prj_name)s/run; chmod a+x /etc/service/%(prj_name)s/run;' % env) else: # delete old service dir run('echo; if [ -d /etc/service/%(prj_name)s ]; then rm -rf /etc/service/%(prj_name)s; fi' % env) if env.use_supervisor: # activate new supervisor.conf run('cp server-setup/supervisor.conf /etc/supervisor/conf.d/%(prj_name)s.conf' % env) if env.use_celery: run('cp server-setup/supervisor-celery.conf /etc/supervisor/conf.d/%(prj_name)s-celery.conf' % env) else: # delete old config file # if you set a process name in supervisor.ini, then you must add it like %(prj_name):appserver run('echo; if [ -f /etc/supervisor/%(prj_name)s.ini ]; then supervisorctl %(prj_name)s stop rm /etc/supervisor/%(prj_name)s.ini; fi' % env) run('echo; if [ -f /etc/supervisor/conf.d/%(prj_name)s.conf ]; then supervisorctl %(prj_name)s stop rm /etc/supervisor/conf.d/%(prj_name)s.conf; fi' % env) if env.use_celery: run('echo; if [ -f /etc/supervisor/%(prj_name)s-celery.ini ]; then supervisorctl celery celerybeat stop rm /etc/supervisor/%(prj_name)s-celery.ini; fi' % env) run('echo; if [ -f /etc/supervisor/conf.d/%(prj_name)s-celery.conf ]; then supervisorctl celery celerybeat stop rm /etc/supervisor/conf.d/%(prj_name)s-celery.conf; fi' % env) if env.use_celery and env.use_daemontools: run('cp server-setup/service-run-celeryd.sh /etc/service/%(prj_name)s-celery/run; chmod a+x /etc/service/%(prj_name)s-celery/run;' % env) # try logrotate with settings(warn_only=True): run('cp server-setup/logrotate.conf /etc/logrotate.d/website-%(prj_name)s' % env) if env.use_celery: run('cp server-setup/logrotate-celery.conf /etc/logrotate.d/celery' % env) run('cp server-setup/letsencrypt.conf /etc/letsencrypt/configs/%(cryptdomain)s.conf' % env) with settings(warn_only=True): run('cd /etc/%(webserver)s/sites-enabled/; ln -s ../sites-available/%(prj_name)s %(prj_name)s' % env)
def testResultAttributes( self ): failing_command = 'cat /etc/shadow' # insufficient permissions succeeding_command = 'uname -a' erroneous_command = 'this-command-does-not-exist -a' # A successful command should have the appropriate status # attributes set result = cuisine.run(succeeding_command) self.assertTrue(result.succeeded) self.assertFalse(result.failed) self.assertEqual(result.return_code, 0) # With warn_only set, we should be able to examine the result # even if it fails with settings(warn_only=True): # command should fail with output to stderr result = cuisine.run(failing_command, combine_stderr=False) self.assertTrue(result.failed) self.assertFalse(result.succeeded) self.assertEqual(result.return_code, 1) self.assertIsNotNone(result.stderr) self.assertIn('Permission denied', result.stderr) # With warn_only off, failure should cause execution to abort with settings(warn_only=False): with self.assertRaises(SystemExit): cuisine.run(failing_command) # An erroneoneous command should fail similarly to fabric with settings(warn_only=True): result = cuisine.run(erroneous_command) self.assertTrue(result.failed) self.assertEqual(result.return_code, 127)
def upgrade(exclude="", verbose=False, upgrade_method=apt.upgrade, pre_method=None, post_method=None, unattended=False): # one hour command timeout, one minute connect) with settings(command_timeout=3600, timeout=60): exclude = exclude.split(";") execute(apt.autoremove) execute(apt.update) packages = execute(apt.verify) apt.filter_packages(packages, exclude) # check if there are packages available for upgrade count_packages = len(list(set(chain(*[p.keys() for p in packages.values()])))) if count_packages == 0: print("No packages to upgrade") return if verbose: apt.print_changes_perhost(packages) else: apt.print_changes(packages) if not unattended: with settings(abort_on_prompts=False): do_it = prompt("Do you want to continue?", default="y") if do_it not in ("y", "Y"): return if pre_method is not None: execute(pre_method) execute(upgrade_method, packages=packages) if post_method is not None: execute(post_method)
def rsync_mountpoints(src_inst, src_vol, src_mnt, dst_inst, dst_vol, dst_mnt, encr=False): """Run `rsync` against mountpoints, copy disk label. :param src_inst: source instance; :param src_vol: source volume with label that will be copied to dst_vol; :param src_mnt: root or directory hierarchy to replicate; :param dst_inst: destination instance; :param dst_vol: destination volume, that will be marked with label from src_vol; :param dst_mnt: destination point where source hierarchy to place; :param encr: True if volume is encrypted; :type encr: bool.""" src_key_filename = config.get(src_inst.region.name, 'KEY_FILENAME') dst_key_filename = config.get(dst_inst.region.name, 'KEY_FILENAME') with config_temp_ssh(dst_inst.connection) as key_file: with settings(host_string=dst_inst.public_dns_name, key_filename=dst_key_filename): wait_for_sudo('cp /root/.ssh/authorized_keys ' '/root/.ssh/authorized_keys.bak') pub_key = local('ssh-keygen -y -f {0}'.format(key_file), True) append('/root/.ssh/authorized_keys', pub_key, use_sudo=True) if encr: sudo('screen -d -m sh -c "nc -l 60000 | gzip -dfc | ' 'sudo dd of={0} bs=16M"' .format(get_vol_dev(dst_vol)), pty=False) # dirty magick dst_ip = sudo( 'curl http://169.254.169.254/latest/meta-data/public-ipv4') with settings(host_string=src_inst.public_dns_name, key_filename=src_key_filename): put(key_file, '.ssh/', mirror_local_mode=True) dst_key_filename = os.path.split(key_file)[1] if encr: sudo('(dd if={0} bs=16M | gzip -cf --fast | nc -v {1} 60000)' .format(get_vol_dev(src_vol), dst_ip)) else: cmd = ( 'rsync -e "ssh -i .ssh/{key_file} -o ' 'StrictHostKeyChecking=no" -cahHAX --delete --inplace ' '--exclude /root/.bash_history ' '--exclude /home/*/.bash_history ' '--exclude /etc/ssh/moduli --exclude /etc/ssh/ssh_host_* ' '--exclude /etc/udev/rules.d/*persistent-net.rules ' '--exclude /var/lib/ec2/* --exclude=/mnt/* ' '--exclude=/proc/* --exclude=/tmp/* ' '{src_mnt}/ root@{rhost}:{dst_mnt}') wait_for_sudo(cmd.format( rhost=dst_inst.public_dns_name, dst_mnt=dst_mnt, key_file=dst_key_filename, src_mnt=src_mnt)) label = sudo('e2label {0}'.format(get_vol_dev(src_vol))) with settings(host_string=dst_inst.public_dns_name, key_filename=dst_key_filename): if not encr: sudo('e2label {0} {1}'.format(get_vol_dev(dst_vol), label)) wait_for_sudo('mv /root/.ssh/authorized_keys.bak ' '/root/.ssh/authorized_keys') run('sync', shell=False) run('for i in {1..20}; do sync; sleep 1; done &')
def upload_file(host, user, local_fn, remote_fn, port=22, rsync_options='-azvr --progress', run_local=False): results = [] if run_local: if not os.path.exists(os.path.dirname(remote_fn)): results.append(local('mkdir -p %s' % os.path.dirname(remote_fn))) results.append(local('rsync -avr --progress %(local_fn)s %(remote_fn)s' % locals())) return results env.host_string = host env.user = user env.port = port remote_dir = os.path.dirname(remote_fn) with settings(warn_only=True): result = run('ls %s' % remote_dir) if result.failed: run('mkdir -p %s' % remote_dir) rsync_template = \ 'rsync %(rsync_options)s %(local_fn)s %(user)s@%(host)s:%(remote_fn)s' with settings(warn_only=True): results.append(local(rsync_template % locals())) return results
def users(self, min_uid=None, max_uid=None, use_sudo=True): """ Return a dict of all users:: {'user1': userstruct(user1), 'user2': userstruct(user2)} """ with settings(hide('everything'), warn_only=True): content = self.execute(self.users_cmd, use_sudo=use_sudo) if content.failed: raise PlatformError(content) users = {} for line in content.splitlines(): name, _, uid, gid, comment, home, shell = line.strip().split(':') uid = int(uid) # Skip over users outside specified min/max uid. if min_uid is not None and uid < min_uid: continue if max_uid is not None and uid > max_uid: continue with settings(hide('everything'), warn_only=True): content = self.execute(self.userget_groups_cmd % name, use_sudo=use_sudo) if content.failed: raise PlatformError(content) content = content.strip().split(' ') group, groups = content[0], set(content[1:]) user = userstruct(name, uid, int(gid), group, groups, comment, home, shell) users[name] = user return users
def setup_capsules(path): """Reads the configuration, create capsules and start content sync on them. """ load_capsule_config(path) config = env.capsule_config server = config.server.host_string # Let Fabric know how to log into the hosts env.passwords = config.passwords env.key_filename = config.key_filenames # The oauth information is needed for every capsule register. Cache this # information. with settings(host_string=server): oauth_info = get_oauth_info() # Register each capsule on the server for capsule in config.capsules: with settings(host_string=server): cert_path = generate_capsule_certs(capsule.hostname) get(remote_path=cert_path, local_path=cert_path) with settings(host_string=capsule.host_string): register_capsule() put(local_path=cert_path) capsule_installer(capsule.hostname, cert_path, *oauth_info)
def ismounted(device): """ Check if partition is mounted Example:: from fabtools.disk import ismounted if ismounted('/dev/sda1'): print ("disk sda1 is mounted") """ # Check filesystem with settings(hide('running', 'stdout')): res = run_as_root('mount') for line in res.splitlines(): fields = line.split() if fields[0] == device: return True # Check swap with settings(hide('running', 'stdout')): res = run_as_root('swapon -s') for line in res.splitlines(): fields = line.split() if fields[0] == device: return True return False
def upgrade(): """Upgrade all Etherpad servers to the version configured in puppet. This will: 1. Ask for a password with which to sudo. All servers must have the same sudo passowrd 2. Perform a git pull on the puppet node to get the latest configuration 3. On each etherpad host: a. Forcefully stop puppet on the etherpad hosts and cancel any runs that are currently happening b. Stop the etherpad services c. Back up the etherpad dir to /tmp/etherpad_backup d. Run puppet, which should notice the missing directory and repopulate it with the configured (updated) version of the package """ cluster_util.ensure_sudo_pass() # Pull the updated puppet data with settings(hosts=[cluster_hosts.puppet()]): execute(puppet.git_update) # Run all etherpad upgrades in parallel with settings(hosts=cluster_hosts.etherpad(), parallel=True): execute(upgrade_host_internal)
def start_clients(): if not env.host_string: return curr_host = env.host_string.split("@")[1] try: serv_dict = clients[env.l2g_map[curr_host]] except: return for serv in serv_dict: for prot in serv_dict[serv]: if prot == "tcp": for port in serv_dict[serv][prot]: command = "/home/ubuntu/traffic/client_server.py -c -n 1 -t -p " + port + " -i " + serv command = "nohup " + command + " >& /dev/null < /dev/null &" print "running %s on %s/%s" % (command, env.host_string, env.l2g_map[curr_host]) with settings(warn_only=True, timeout=2): try: sudo(command, pty=False) except: print "FAILED..." elif prot == "udp": for port in serv_dict[serv][prot]: command = "/home/ubuntu/traffic/client_server.py -c -n 1 -u -p " + port + " -i " + serv command = "nohup " + command + " >& /dev/null < /dev/null &" print "running %s on %s/%s" % (command, env.host_string, env.l2g_map[curr_host]) with settings(warn_only=True, timeout=2): try: sudo(command, pty=False) except: print "FAILED..." else: print "Nothing to be done..." return
def ensure_ssh_key_added(key_files): need_adding = set(os.path.abspath(os.path.expanduser(p)) for p in key_files) with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): # First check already added keys res = local("ssh-add -l", capture=True) if res.succeeded: for line in res.splitlines(): m = SSH_KEY_LIST_RE.match(line) if not m: continue path = os.path.abspath(os.path.expanduser(m.group('key_file'))) need_adding.discard(path) with settings(hide('warnings', 'running', 'stdout', 'stderr')): # Next add missing keys if need_adding: key_string = ' '.join(need_adding) start_ssh_agent = ("eval `ssh-agent` && echo $SSH_AUTH_SOCK && " "ssh-add %s") % key_string info_agent = local(start_ssh_agent, capture=True).splitlines() os.environ["SSH_AGENT_PID"] = info_agent[0].split()[-1] os.environ["SSH_AUTH_SOCK"] = info_agent[1] return False else: return True
def _create_account(username, region, instance_ids, passwordless, sudo): if not _isfile(username + '.pub'): abort("%s.pub does not exist" % username) env.ssh_key = username + '.pub' env.username = username # Own attribute for string formatting. if passwordless: _sudo('adduser --disabled-password %(username)s' % env) if sudo: _sudo('sed -i "s/# %sudo ALL=NOPASSWD: ALL/' '%sudo ALL=NOPASSWD: ALL/" /etc/sudoers') for group in ['sudo']: with settings(group=group): _sudo('adduser %(username)s %(group)s' % env) else: _sudo('adduser %(username)s' % env) if sudo: for group in ['adm', 'admin', 'staff']: with settings(group=group): _sudo('adduser %(username)s %(group)s' % env) _sudo('mkdir -p /home/%(username)s/.ssh' % env) _sudo('touch /home/%(username)s/.ssh/authorized_keys' % env) _sudo('chown -R %(username)s: /home/%(username)s/.ssh' % env) _sudo('chmod 700 /home/%(username)s/.ssh' % env) put(env.ssh_key, '/home/%(username)s/.ssh/authorized_keys' % env, use_sudo=True) _sudo('chown -R %(username)s: /home/%(username)s/.' 'ssh/authorized_keys' % env) _sudo('chmod 600 /home/%(username)s/.ssh/authorized_keys' % env)
def forceQuitUnveillance(target=None): if target is None: target = "unveillance_frontend" with settings(warn_only=True): kill_list = local("ps -ef | grep %s.py" % target, capture=True) for k in [k.strip() for k in kill_list.splitlines()]: print k if re.match(r".*\d{1,2}:\d{2}[:|\.]\d{2}\s+/bin/(?:ba)?sh", k) is not None: continue if re.match(r".*\d{1,2}:\d{2}[:|\.]\d{2}\s+grep", k) is not None: continue if re.match(r".*\d{1,2}:\d{2}[:|\.]\d{2}\s+.*[Pp]ython\sshutdown\.py", k) is not None: continue pid = re.findall(re.compile("(?:\d{3,4}|[a-zA-Z0-9_\-\+]{1,8})\s+(\d{2,6})\s+\d{1,6}.*%s\.py" % target), k) print pid if len(pid) == 1 and len(pid[0]) >= 1: try: pid = int(pid[0]) except Exception as e: print "ERROR: %s" % e continue with settings(warn_only=True): local("kill -9 %d" % pid)
def get_config(package_name, save_as=None): try: config = json.loads(sys.stdin.read())[0] except Exception as e: print e return c_map = { 'p' : int(config['HostConfig']['PortBindings']['22/tcp'][0]['HostPort']), 'o' : "PubkeyAuthentication=no", 'u' : config['Config']['User'], 'h' : str(config['Config']['WorkingDir']), 'a' : package_name } package_config = None cmd = "ssh -f -o %(o)s -p %(p)d %(u)s@localhost 'source ~/.bash_profile && cd %(h)s/%(a)s/lib/Annex && python unveillance_annex.py -config'" % (c_map) with settings(hide('everything'), warn_only=True): sentinel_found = False for line in local(cmd, capture=True).splitlines(): if re.match(r'THE FOLLOWING LINES MAKE FOR YOUR FRONTEND CONFIG', line): sentinel_found = True continue if not sentinel_found: continue try: if line[0] == '{' and line[-1] == '}': package_config = line break except Exception as e: continue if package_config is not None: package_config = json.loads(package_config) s = package_config['server_port'] m = package_config['server_message_port'] package_config.update({ 'annex_remote_port' : c_map['p'], 'server_port' : int(config['HostConfig']['PortBindings']['%d/tcp' % s][0]['HostPort']), 'server_message_port' : int(config['HostConfig']['PortBindings']['%d/tcp' % m][0]['HostPort']), 'server_force_ssh' : True, 'server_user' : c_map['u'] }) i = "%s.%s" % (config['Config']['Image'].replace(":", "-"), config['Config']['Hostname']) if not os.path.exists("configs"): with settings(hide('everything'), warn_only=True): local("mkdir configs") with open("configs/%s.json" % i, 'wb+') as C: C.write(json.dumps(package_config)) print i
def _git_stash(env, brew_cmd): """Perform a safe git stash around an update. This circumvents brews internal stash approach which doesn't work on older versions of git and is sensitive to missing config.emails. """ brew_prefix = env.safe_run_output("{brew_cmd} --prefix".format(**locals())) with cd(brew_prefix): with quiet(): with settings(warn_only=True): env.safe_run("git config user.email '*****@*****.**'") check_diff = env.safe_run("git diff --quiet") git_version = env.safe_run_output("git --version").strip().split()[-1] if git_version and LooseVersion(git_version) < LooseVersion("1.7"): if check_diff.return_code > 0: with cd(brew_prefix): with settings(warn_only=True): env.safe_run("git stash --quiet") try: yield None finally: if check_diff.return_code > 0: with cd(brew_prefix): with settings(warn_only=True): env.safe_run("git stash pop --quiet") else: yield None
def install(): user = getpass.getuser() installDir = os.getcwd() for command in COMMANDS: fab.local(command) casts = fab.prompt(colors.cyan('Specify directory where you want the ' 'casts to be stored'), default=os.path.join(installDir, 'casts')) values = {'user': user, 'installDir': installDir, 'termcastPath': os.path.join(installDir, 'termcast'), 'termcastPlayPath': os.path.join(installDir, 'termcast-play'), 'casts': casts} with fab.settings(warn_only=True): fab.local('rm {}'.format(values['termcastPath'])) fab.local('rm {}'.format(values['termcastPlayPath'])) write_file(values['termcastPath'], termcastTemplate.format(**values)) write_file(values['termcastPlayPath'], termcastPlayTemplate.format(**values)) linkCommand = 'sudo ln -s {} /usr/bin' for script in SCRIPTS: with fab.settings(warn_only=True): fab.local(linkCommand.format(os.path.join(installDir, script))) inetdConfText = inetdConfTemplate.format(**values) write_sudo_file('/etc/inetd.conf', inetdConfText) fab.local('sudo /etc/init.d/openbsd-inetd restart') fab.local('chmod a+x {}'.format(values['termcastPath'])) fab.local('chmod a+x {}'.format(values['termcastPlayPath']))
def setup_repo(): with settings(warn_only=True): run('git clone https://github.com/samuelclay/NewsBlur.git ~/newsblur') sudo('mkdir -p /srv') with settings(warn_only=True): sudo('ln -f -s /home/%s/code /srv/' % env.user) sudo('ln -f -s /home/%s/newsblur /srv/' % env.user)
def provision(): print(magenta('Starting Provisioning')) message = 'Waiting for puppet to become available' with hide('everything'): with settings(warn_only=True): while 1: sys.stdout.write("\r" + magenta(message) + " ") sys.stdout.flush() # we don't have a puppet master here # so we need to poll if run("which puppet").succeeded: sys.stdout.write("\n") sys.stdout.flush() break message = message + white('.') time.sleep(2) # this AMI does not let you log in as root. # we need to be sure the agent-forwarding is active # when we provision, so we pass -E on top of the default # fabric sudo prefix. The default rackspace images # allow you to ssh as root sudo_prefix = "sudo -S -E -p '%(sudo_prompt)s' " % env with settings(sudo_prefix=sudo_prefix): sudo("puppet apply --modulepath '/home/ubuntu/configuration/modules' /home/ubuntu/configuration/site.pp")
def shell_django(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python manage.py shell \ --verbosity 3 --traceback' % env, capture=False)
def clean_sphinx(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python setup.py clean_sphinx' % env, capture=False)
def build(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python setup.py build' % env, capture=False)
def tx_push(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s tx push -s -t --skip --no-interactive' % env, capture=False)
def tx_pull(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s tx pull -a --skip' % env, capture=False)
def init_catalog(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python setup.py init_catalog' % env, capture=False)
def extract_messages(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python setup.py extract_messages' % env, capture=False)
def runcelery_daemon(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python manage.py celeryd -c 1 -l INFO' % env, capture=False)
def create_virtualenv(): with cd('%(basedir)s' % env): with settings(command='virtualenv \ %(virtualenv_args)s %(virtualenv_dir)s' % env): local('%(command)s' % env, capture=False)
def runcelery_worker(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python manage.py \ celery beat -s celerybeat-schedule ' % env, capture=False)
def deconfigure_sudo(): with settings(command='sudo /bin/bash -c \ "rm -rf /etc/sudoers.d/tribus"' % env): local('%(command)s' % env, capture=False)
def drop_mongo(): with settings(command='mongo tribus --eval \'db.dropDatabase()\'' % env): local('%(command)s' % env, capture=False)
def rebuild_index(): with cd('%(basedir)s' % env): with settings(command='. %(virtualenv_activate)s;' % env): local('%(command)s python manage.py rebuild_index \ --noinput --verbosity 3 --traceback' % env, capture=False)
def preseed_packages(): with settings(command='sudo /bin/bash -c \ "debconf-set-selections %s"' % f_workenv_preseed): local('%(command)s' % env, capture=False)
def stop(): with settings(warn_only=True): run('killall -KILL healer')
def configure_sudo(): with settings(command='su root -c "echo \'%(user)s ALL= NOPASSWD: ALL\' > \ /etc/sudoers.d/tribus; chmod 0440 /etc/sudoers.d/tribus"' % env): local('%(command)s' % env, capture=False)
def _promptup(): with settings(warn_only=True): local('hg tag "%s"' % getversion('blade/__init__.py')) local('hg push ssh://[email protected]/lcrees/blade') local('hg push github')
def clean_tmp(): get_logger().debug("Cleaning tmp directories on host {}".format(env.host)) with settings(warn_only=True): sudo('rm -rf /tmp/*') return True
def iter_keys_per_peer_md5_config(self): auth_data = None for host in self.list_uuid: self.config_per_peer(auth_data=auth_data) self.config_md5(host=host, auth_data=auth_data) sleep(95) assert (self.check_bgp_status( self.is_mx_present)), "BGP between nodes should be up before md5" auth_data = { 'key_items': [{ 'key': "iter", "key_id": 0 }], "key_type": "md5" } self.config_per_peer(auth_data=auth_data) sleep(95) assert (self.check_bgp_status(self.is_mx_present) ), "BGP between nodes not up after per peer with mx" for i in range(1, 11): for host in self.list_uuid: key = "juniper" + i.__str__() auth_data = { 'key_items': [{ 'key': key, "key_id": 0 }], "key_type": "md5" } self.config_md5(host=host, auth_data=auth_data) sleep(95) assert (self.check_tcp_status() ), "TCP connection should be up after key change" assert ( self.check_bgp_status(self.is_mx_present) ), "BGP between nodes should be up 1 as keys are the same everywhere" with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.cfgm_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): conrt = run('service contrail-control restart') cluster_status, error_nodes = ContrailStatusChecker( ).wait_till_contrail_cluster_stable() assert cluster_status, 'Hash of error nodes and services : %s' % ( error_nodes) assert ( self.check_bgp_status(self.is_mx_present) ), "BGP between nodes should be up 2 as keys are the same everywhere" for i in range(1, 11): for host in self.list_uuid: key = "juniper" + i.__str__() auth_data = { 'key_items': [{ 'key': key, "key_id": 0 }], "key_type": "md5" } self.config_md5(host=host, auth_data=auth_data) sleep(95) assert (self.check_tcp_status() ), "TCP connection should be up after key change" assert ( self.check_bgp_status(self.is_mx_present) ), "BGP between nodes should be up 3 as keys are the same everywhere" with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.cfgm_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): conrt = run('service contrail-control restart') cluster_status, error_nodes = ContrailStatusChecker( ).wait_till_contrail_cluster_stable() assert cluster_status, 'Hash of error nodes and services : %s' % ( error_nodes) assert ( self.check_bgp_status(self.is_mx_present) ), "BGP between nodes should be up 4 as keys are the same everywhere" for i in range(1, 11): key = "juniper" + i.__str__() auth_data = { 'key_items': [{ 'key': key, "key_id": 0 }], "key_type": "md5" } self.config_per_peer(auth_data=auth_data) #with repetitive config/unconfig, tcp takes a little longer to come up. #does not seem contrail issue, still needs a debug. Increasing the timeout as a temp measure. sleep(120) assert (self.check_tcp_status() ), "TCP connection should be up after key change" assert (self.check_bgp_status(self.is_mx_present) ), "BGP between nodes not up after per peer match" for i in range(1, 11): key = "juniper" + i.__str__() auth_data = { 'key_items': [{ 'key': key, "key_id": 0 }], "key_type": "md5" } notmx = 1 self.config_per_peer(auth_data=auth_data) sleep(95) assert (self.check_tcp_status() ), "TCP connection should be up after key change" assert (self.check_bgp_status(self.is_mx_present) ), "BGP between nodes not up after per peer match" return True
def check_host(): "Check that needed tools are installed on hosts" # get type of current host htype = get_type_cached(env.host_string) # run checks if env.host_string in config.TPCONF_router: if htype == 'FreeBSD': run('which ipfw') if htype == "Linux": run('which tc') run('which iptables') # XXX check that kernel tick rate is high (>= 1000) else: if htype == 'FreeBSD': run('which md5') run('which tcpdump') elif htype == 'Darwin': run('which md5') run('which tcpdump') run('which dsiftr-osx-teacup.d') elif htype == 'Linux': run('which ethtool') run('which md5sum') run('which tcpdump') #run('which web10g-listconns') #run('which web10g-readvars') #updated for ttprobe support try: linux_tcp_logger = config.TPCONF_linux_tcp_logger except AttributeError: linux_tcp_logger = 'web10g' if linux_tcp_logger == 'ttprobe' or linux_tcp_logger == 'both': #checking the availability of ttprobe.ko kernel module run('ls /lib/modules/$(uname -r)/extra/ttprobe.ko') if linux_tcp_logger == 'web10g' or linux_tcp_logger == 'both': run('which web10g-logger') elif htype == 'CYGWIN': run('which WinDump', pty=False) run('which win-estats-logger', pty=False) # if we don't have proper ntp installed then # start time service if not started and force resync with settings(warn_only=True): ret = run('ls "/cygdrive/c/Program Files (x86)/NTP/bin/ntpq"') if ret.return_code != 0: run('net start w32time', pty=False) run('w32tm /resync', pty=False) # try to enable any test network interfaces that are (accidently) # disabled after reboot with settings(warn_only=True): interfaces = get_netint_cached(env.host_string, int_no=-1) for interface in interfaces: run('netsh int set int "Local Area Connection %s" enabled' % interface, pty=False) run('which killall', pty=False) run('which pkill', pty=False) run('which ps', pty=False) run('which gzip', pty=False) run('which dd', pty=False) # check for traffic sender/receiver tools run('which iperf', pty=False) run('which ping', pty=False) run('which httperf', pty=False) run('which lighttpd', pty=False) run('which nttcp', pty=False) put(config.TPCONF_script_path + '/runbg_wrapper.sh', '/usr/bin') run('chmod a+x /usr/bin/runbg_wrapper.sh', pty=False) run('which runbg_wrapper.sh', pty=False) put(config.TPCONF_script_path + '/kill_iperf.sh', '/usr/bin') run('chmod a+x /usr/bin/kill_iperf.sh', pty=False) run('which kill_iperf.sh', pty=False) put(config.TPCONF_script_path + '/pktgen.sh', '/usr/bin') run('chmod a+x /usr/bin/pktgen.sh', pty=False) run('which pktgen.sh', pty=False)
def get_project_head(project_name=project_name, project_username=project_username): """See what remote project's last commit is.""" with settings(user=project_username): with cd('/home/%s/%s' % (project_username, project_name)): run('git log -n1')
def _set_cwag_test_networking(mac): # Don't error if route already exists with settings(warn_only=True): sudo('ip route add %s/24 dev %s proto static scope link' % (TRF_SERVER_SUBNET, CWAG_TEST_BR_NAME)) sudo('arp -s %s %s' % (TRF_SERVER_IP, mac))
def tail(): env.remote_interrupt = True with settings(warn_only=True): run('journalctl --unit=downloader@* --follow --lines=0', pty=True)
def remote_command(cmd): with settings(warn_only=True): fabric.api.run('pkill -9 run.sh') time.sleep(1) fabric.api.run(cmd, pty=False)
def stop_mysql(): with settings(warn_only=True): run("sudo service mysql stop")
def setup_project_apache(project_name=project_name, project_username=project_username, apache_server_name=apache_server_name, apache_server_alias=apache_server_alias, django_settings=None, server_admin=server_admin, media_directory=media_directory, env_path=env_path, branch=branch): """ Configure apache-related settings for the project. This will render every *.apache2 file in the current local directory as a template with project_name, project_username, branch, server_name and server_alias as context. It'll put the rendered template in apache sites-available. It will also render any *.wsgi file with the same context. It will put the rendered file in the project user's home directory. media_directory should be relative to the project user's home directory. It defaults to project_username/media ie you'd end up with /home/project/project/media/ """ django_settings = django_settings or _get_django_settings() with cd('/home/%s' % project_username): # permissions for media/ sudo('chgrp www-data -R %s' % media_directory) sudo('chmod g+w %s' % media_directory) context = { 'project_name': project_name, 'project_username': project_username, 'server_name': apache_server_name, 'server_alias': apache_server_alias, 'django_settings': django_settings, 'env_path': env_path, 'branch': branch, 'server_admin': server_admin, } # apache config for config_path in local('find $PWD -name "*.apache2"').split('\n'): d, sep, config_filename = config_path.rpartition('/') dest_path = '/etc/apache2/sites-available/%s' % config_filename if not files.exists(dest_path, use_sudo=True): files.upload_template(config_path, dest_path, context=context, use_sudo=True) sudo('a2ensite %s' % config_filename) # wsgi file for wsgi_path in local('find $PWD -name "*.wsgi"').split('\n'): d, sep, wsgi_filename = wsgi_path.rpartition('/') dest_path = '/home/%s/%s' % (project_username, wsgi_filename) if not files.exists(dest_path, use_sudo=True): files.upload_template(wsgi_path, dest_path, use_sudo=True, context=context) sudo('chown %s:%s %s' % (project_username, 'www-data', dest_path)) sudo('chmod 755 %s' % dest_path) with settings(warn_only=True): check_config = sudo('apache2ctl configtest') if check_config.failed: print(red('Invalid apache configuration! The requested configuration was installed, but there is a problem with it.')) else: louis.commands.apache_reload()
def test(): with settings(warn_only=True): result = local("nosetests -v", capture=True) if result.failed and not confirm("Tests failed. Continue?"): abort("Aborted at user request")
def _check_sudo(): with settings(warn_only=True): result = sudo('pwd') if result.failed: print "Trying to install sudo. Must be root" run('apt-get update && apt-get install -y sudo')
def exists(name): """ Check if a user exists. """ with settings(hide('running', 'stdout', 'warnings'), warn_only=True): return run('getent passwd %(name)s' % locals()).succeeded
def post_backup_tasks(): with settings(warn_only=True): execute(start_mysql, hosts=['%s' % (env.host)])