def setup_munin_node(allow=[]): ''' Install and configure a munin node. Does not include configuration of the munin server! ''' ''' TODO: * Make sure "host=*" is set * Insert "cidr_allow" at right place * Have a blacklist of unneeded munin plugins, and disable them. * Co-configure munin-server * Make sure munin-node is in runlevel ''' need_sudo = am_not_root() allow = _listify(allow) pkg_install('munin-node') conf = '/etc/munin/munin-node.conf' backup_orig(conf, use_sudo = need_sudo) conf = '/etc/munin/munin-node.conf' backup_orig(conf, use_sudo = need_sudo) for client_ip in allow: if not '/' in client_ip: client_ip += '/32' append(conf, 'cidr_allow %s' % client_ip, use_sudo = need_sudo) configure_ufw(rules = ['allow proto tcp from %s to any port 4949' % client_ip] ) _run('service munin-node restart', use_sudo = need_sudo)
def _configure_postgresql(env, delete_main_dbcluster=False): """ This method is intended for cleaning up the installation when PostgreSQL is installed from a package. Basically, when PostgreSQL is installed from a package, it creates a default database cluster and splits the config file away from the data. This method can delete the default database cluster that was automatically created when the package is installed. Deleting the main database cluster also has the effect of stopping the auto-start of the postmaster server at machine boot. The method adds all of the PostgreSQL commands to the PATH. """ pg_ver = sudo("dpkg -s postgresql | grep Version | cut -f2 -d':'") pg_ver = pg_ver.strip()[:3] # Get first 3 chars of the version since that's all that's used for dir name got_ver = False while(not got_ver): try: pg_ver = float(pg_ver) got_ver = True except Exception: print(red("Problems trying to figure out PostgreSQL version.")) pg_ver = raw_input(red("Enter the correct one (eg, 9.1; not 9.1.3): ")) if delete_main_dbcluster: env.safe_sudo('pg_dropcluster --stop %s main' % pg_ver, user='******') # Not sure why I ever added this to gvl, doesn't seem needed. -John #_put_installed_file_as_user("postgresql-%s.conf" % env.postgres_version, "/etc/postgresql/%s/main/postgresql.conf" % env.postgres_version, user='******') exp = "export PATH=/usr/lib/postgresql/%s/bin:$PATH" % pg_ver if not contains('/etc/bash.bashrc', exp): append('/etc/bash.bashrc', exp, use_sudo=True)
def install(self): self.set_locale() self.setup_firewall() self.update_packages() self.install_packages() self.install_servers() append('. /etc/bash_completion', '/home/%s/.bashrc' % state.env.user)
def forward_port(): sudo("apt-get -qq install nginx") sudo("rm /etc/nginx/sites-available/default") conf = """\ upstream app_server { server 127.0.0.1:8080 fail_timeout=0; } server { listen 80; listen [::]:80 default ipv6only=on; server_name ci.yourcompany.com; location / { proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header Host $http_host; proxy_redirect off; if (!-f $request_filename) { proxy_pass http://app_server; break; } } } """ append("/etc/nginx/sites-available/jenkins", conf, use_sudo=True) sudo("ln -s /etc/nginx/sites-available/jenkins /etc/nginx/sites-enabled") sudo("service nginx restart")
def add_deploy_user(): """Create the deploy user account, one time task. The deploy user is used for almost all processes. Your SSH key is pushed so that you can login via ssh keys. """ username = '******' with settings(user='******'): # Create the user, no password fastprint('adding the %s user account...' % username) run('useradd -m -s /bin/bash %s' % username) run('adduser %s sudo' % username) # Allow this user to sudo without password # really should list specific command (the last ALL) files.append('/etc/sudoers.d/%s' % username, '%s ALL=(ALL:ALL) NOPASSWD: ALL' % username) fastprint('setting up SSH') ssh_path = '/home/%s/.ssh' % username if not files.exists(ssh_path, verbose=True): run('mkdir %s' % ssh_path) run('chmod 700 %s' % ssh_path) key_text = _read_key_file('~/.ssh/id_rsa.pub') files.append('%s/authorized_keys' % ssh_path, key_text) run('chown -R %s:%s %s' % (username, username, ssh_path))
def user_sshkey(): """ Upload an SSH key to the remote system for the current user. :Example: fab --config=config/local.conf local system.user_sshkey """ require('PUBLIC_SSH_KEY') with open(env.PUBLIC_SSH_KEY) as reader: key = reader.read() remote_directory = '/home/{}/.ssh'.format(env.user) remote_authkeys = '{}/authorized_keys'.format(remote_directory) new_directory = False if not files.exists(remote_directory): new_directory = True # Create the ".ssh" directory. run('mkdir -p {}'.format(remote_directory)) # Add the key to "authorized keys". files.append(remote_authkeys, key) if new_directory: # Set directory permission to "700". run('chmod 700 {}'.format(remote_directory)) # Set file permission to "600". run('chmod 600 {}'.format(remote_authkeys))
def _configure_nfs(env): nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxyData/export" if not exists(nfs_dir): # For the case of rerunning this script, ensure the nfs_dir does # not exist (exists() method does not recognize it as a file because # by default it points to a non-existing dir/file). with settings(warn_only=True): sudo('rm -rf {0}'.format(nfs_dir)) sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) sudo("ln -s %s %s" % (cloudman_dir, nfs_dir)) sudo("chown -R %s %s" % (env.user, os.path.dirname(nfs_dir))) # Setup /etc/exports paths, to be used as NFS mount points exports = [ '/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyData *(rw,sync,no_root_squash,subtree_check,no_wdelay)', '/mnt/galaxyIndices *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyTools *(rw,sync,no_root_squash,no_subtree_check)', '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir] append('/etc/exports', exports, use_sudo=True) # Create a symlink for backward compatibility where all of CloudMan's # stuff is expected to be in /opt/galaxy old_dir = '/opt/galaxy' # Because stow is used, the equivalent to CloudMan's expected path # is actually the parent of the install_dir so use it for the symlink new_dir = os.path.dirname(env.install_dir) if not exists(old_dir) and exists(new_dir): sudo('ln -s {0} {1}'.format(new_dir, old_dir)) env.logger.debug("Done configuring CloudMan NFS")
def init(): with settings(user='******'): sudo('apt-get update') sudo('apt-get install -y mc lighttpd mysql-client git-core python-setuptools python-dev runit rrdtool sendmail memcached libjpeg62-dev') sudo('apt-get build-dep -y python-mysqldb') if not exists('/home/%s' % SSH_USER): sudo('yes | adduser --disabled-password %s' % SSH_USER) sudo('mkdir /home/%s/.ssh' % SSH_USER) sudo('echo "%s" >> /home/%s/.ssh/authorized_keys' % (env.www_ssh_key, SSH_USER)) append('/etc/sudoers', '%s ALL=(ALL) NOPASSWD:/usr/bin/sv' % SSH_USER, use_sudo=True) if not exists('/var/log/projects/uralsocionics'): sudo('mkdir -p /var/log/projects/uralsocionics') sudo('chmod 777 /var/log/projects/uralsocionics') if not exists('/etc/lighttpd/conf-available/10-modules.conf'): put('tools/lighttpd/10-modules.conf', '/etc/lighttpd/conf-available/10-modules.conf', use_sudo=True) sudo('ln -s /etc/lighttpd/conf-available/10-modules.conf /etc/lighttpd/conf-enabled/10-modules.conf', shell=False) if not exists('/etc/lighttpd/conf-available/90-uralsocionics.conf'): sudo('touch /etc/lighttpd/conf-available/90-uralsocionics.conf') if not exists('/etc/lighttpd/conf-enabled/90-uralsocionics.conf'): sudo('ln -s /etc/lighttpd/conf-available/90-uralsocionics.conf /etc/lighttpd/conf-enabled/90-uralsocionics.conf', shell=False) if not exists('/etc/sv/uralsocionics'): sudo('mkdir -p /etc/sv/uralsocionics/supervise') sudo('touch /etc/sv/uralsocionics/run') sudo('chmod 755 /etc/sv/uralsocionics/run') sudo('ln -s /etc/sv/uralsocionics /etc/service/uralsocionics', shell=False) sudo('mkdir -p /home/%s/projects/uralsocionics' % SSH_USER) sudo('chown -R %(user)s:%(user)s /home/%(user)s' % {'user': SSH_USER})
def rsync_mountpoints(src_inst, src_vol, src_mnt, dst_inst, dst_vol, dst_mnt, encr=False): """Run `rsync` against mountpoints, copy disk label. :param src_inst: source instance; :param src_vol: source volume with label that will be copied to dst_vol; :param src_mnt: root or directory hierarchy to replicate; :param dst_inst: destination instance; :param dst_vol: destination volume, that will be marked with label from src_vol; :param dst_mnt: destination point where source hierarchy to place; :param encr: True if volume is encrypted; :type encr: bool.""" src_key_filename = config.get(src_inst.region.name, 'KEY_FILENAME') dst_key_filename = config.get(dst_inst.region.name, 'KEY_FILENAME') with config_temp_ssh(dst_inst.connection) as key_file: with settings(host_string=dst_inst.public_dns_name, key_filename=dst_key_filename): wait_for_sudo('cp /root/.ssh/authorized_keys ' '/root/.ssh/authorized_keys.bak') pub_key = local('ssh-keygen -y -f {0}'.format(key_file), True) append('/root/.ssh/authorized_keys', pub_key, use_sudo=True) if encr: sudo('screen -d -m sh -c "nc -l 60000 | gzip -dfc | ' 'sudo dd of={0} bs=16M"' .format(get_vol_dev(dst_vol)), pty=False) # dirty magick dst_ip = sudo( 'curl http://169.254.169.254/latest/meta-data/public-ipv4') with settings(host_string=src_inst.public_dns_name, key_filename=src_key_filename): put(key_file, '.ssh/', mirror_local_mode=True) dst_key_filename = os.path.split(key_file)[1] if encr: sudo('(dd if={0} bs=16M | gzip -cf --fast | nc -v {1} 60000)' .format(get_vol_dev(src_vol), dst_ip)) else: cmd = ( 'rsync -e "ssh -i .ssh/{key_file} -o ' 'StrictHostKeyChecking=no" -cahHAX --delete --inplace ' '--exclude /root/.bash_history ' '--exclude /home/*/.bash_history ' '--exclude /etc/ssh/moduli --exclude /etc/ssh/ssh_host_* ' '--exclude /etc/udev/rules.d/*persistent-net.rules ' '--exclude /var/lib/ec2/* --exclude=/mnt/* ' '--exclude=/proc/* --exclude=/tmp/* ' '{src_mnt}/ root@{rhost}:{dst_mnt}') wait_for_sudo(cmd.format( rhost=dst_inst.public_dns_name, dst_mnt=dst_mnt, key_file=dst_key_filename, src_mnt=src_mnt)) label = sudo('e2label {0}'.format(get_vol_dev(src_vol))) with settings(host_string=dst_inst.public_dns_name, key_filename=dst_key_filename): if not encr: sudo('e2label {0} {1}'.format(get_vol_dev(dst_vol), label)) wait_for_sudo('mv /root/.ssh/authorized_keys.bak ' '/root/.ssh/authorized_keys') run('sync', shell=False) run('for i in {1..20}; do sync; sleep 1; done &')
def _set_profile(self): super(AppSetup, self)._set_profile() if self.settings_host and env.project_env_var: data = {'env_name': env.project_env_var, 'value' : self.settings_host} line = '%(env_name)s="%(value)s"; export %(env_name)s' % data append('/etc/profile', line, use_sudo=True)
def append_saltmaster_config(): data = ['file_client: local' ,'file_roots:\n stage:\n - /srv/lbaas-staging-salt\n' ,'pillar_roots:\n stage:\n - /srv/lbaas-staging-pillar\n' ] append('/etc/salt/minion',data, use_sudo=True) append('/etc/salt/master',data, use_sudo=True)
def _put_minion_debug_mode(minion_cnf_file='/etc/salt/minion', log_level='log_level: trace', use_sudo=True): ''' Helper function to put minion in debug mode ''' if not use_sudo: raise _NeedSudoError("You need to run this as sudo!") files.append(minion_cnf_file, log_level, use_sudo=use_sudo)
def _add_minion_id(minion_id, minion_id_file='/etc/salt/minion_id', use_sudo=True): ''' Helper function to add minion_id to /etc/salt/minion_id ''' if not use_sudo: raise _NeedSudoError("You need to run this as sudo!") files.append(minion_id_file, minion_id, use_sudo=use_sudo)
def base_install(): """ Perform the basic install """ # Update the AMI completely sudo('yum --assumeyes --quiet update') # Install puppet and git sudo('yum --assumeyes --quiet install puppet git') # Clone our code run('git clone git://github.com/ICRAR/boinc-magphys.git') # Puppet and git should be installed by the python with cd('/home/ec2-user/boinc-magphys/machine-setup'): sudo('puppet boinc-magphys.pp') # Recommended version per http://boinc.berkeley.edu/download_all.php on 2012-07-10 run('svn co http://boinc.berkeley.edu/svn/trunk/boinc /home/ec2-user/boinc') # Setup the pythonpath append('/home/ec2-user/.bash_profile', ['', 'PYTHONPATH=/home/ec2-user/boinc/py:/home/ec2-user/boinc-magphys/server/src', 'export PYTHONPATH']) # Setup the python run('wget http://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11-py2.7.egg') sudo('sh setuptools-0.6c11-py2.7.egg') run('rm setuptools-0.6c11-py2.7.egg') sudo('rm -f /usr/bin/easy_install') sudo('easy_install-2.7 pip') sudo('rm -f /usr/bin/pip') sudo('pip-2.7 install fabric') sudo('pip-2.7 install configobj')
def configure_replication(): # determine the PGDATA directory pgdata = sudo("pg_lsclusters | grep postgres | awk '{print $6}'") pgversion = sudo("pg_lsclusters | grep postgres | awk '{print $1}'") pgconfDir = "/etc/postgresql/%s/main" % pgversion if env.host == env.db_master: srcIp = env.db_master destIp = env.db_slave else: srcIp = env.db_slave destIp = env.db_master if env.host == env.db_master: sudo("psql -c \"CREATE USER replicator SUPERUSER LOGIN CONNECTION LIMIT 1 ENCRYPTED PASSWORD '%s';\"" % env.db_replicatorpwd, user="******") # shut down the server and then edit configs sudo("service postgresql stop", pty=False) # update config # TODO -- make this replace inline...too lazy for now pgconfCfg = \ "hot_standby = on\n" \ "max_wal_senders = 1\n" \ "wal_level = 'hot_standby'\n" \ "archive_mode = on\n" \ "archive_command = 'cd .'\n" \ "listen_addresses = 'localhost,%s'" % env.host sudo("echo \"%s\" >> %s/postgresql.conf" % (pgconfCfg, pgconfDir)) files.append("%s/pg_hba.conf" % pgconfDir, "host replication replicator %s/32 md5" % destIp, use_sudo=True)
def locales(names): """ Require the list of locales to be available. """ config_file = '/var/lib/locales/supported.d/local' if not is_file(config_file): config_file = '/etc/locale.gen' # Regenerate locales if config file changes with watch(config_file, use_sudo=True) as config: # Add valid locale names to the config file supported = dict(supported_locales()) for name in names: if name in supported: charset = supported[name] locale = "%s %s" % (name, charset) uncomment(config_file, escape(locale), use_sudo=True, shell=True) append(config_file, locale, use_sudo=True, partial=True, shell=True) else: warn('Unsupported locale name "%s"' % name) if config.changed: if distrib_id() == "Archlinux": run_as_root('locale-gen') else: run_as_root('dpkg-reconfigure --frontend=noninteractive locales')
def create_app_user(): #sudo("sudo locale-gen UTF-8") user_exists = run("id -u hadoop", warn_only=True) if user_exists.return_code == 1: sudo("useradd hadoop --password hadoop -d /home/hadoop -s /bin/bash") if not exists("/home/hadoop/.ssh"): sudo("mkdir -p /home/hadoop/.ssh") sudo("chown -R hadoop /home/hadoop") bash_login_content = """ if [ -f ~/.bashrc ]; then . ~/.bashrc fi """ _replace_file_content("/home/hadoop/.bash_login", bash_login_content) with settings(sudo_user='******'): if not exists('/home/hadoop/.ssh/id_rsa'): sudo('ssh-keygen -t rsa -P "" -f /home/hadoop/.ssh/id_rsa') sudo("cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys") sudo("chmod 0600 /home/hadoop/.ssh/authorized_keys") sudo("ssh-keyscan -H localhost >> /home/hadoop/.ssh/known_hosts") sudo("ssh-keyscan -H 0.0.0.0 >> /home/hadoop/.ssh/known_hosts") if not exists("/home/hadoop/.bashrc"): sudo("touch /home/hadoop/.bashrc") if not contains("/home/hadoop/.bashrc", "export HADOOP_HOME=/usr/local/lib/hadoop"): append("/home/hadoop/.bashrc", APP_USER_SETTINGS, use_sudo=True)
def initialise_qgis_plugin_repo(): """Initialise a QGIS plugin repo where we host test builds.""" _all() fabtools.require.deb.package('libapache2-mod-wsgi') code_path = os.path.join(env.repo_path, env.repo_alias) local_path = '%s/scripts/test-build-repo' % code_path if not exists(env.plugin_repo_path): sudo('mkdir -p %s' % env.plugin_repo_path) sudo('chown %s.%s %s' % (env.user, env.user, env.plugin_repo_path)) run('cp %s/plugin* %s' % (local_path, env.plugin_repo_path)) run('cp %s/icon* %s' % (code_path, env.plugin_repo_path)) run('cp %(local_path)s/inasafe-test.conf.templ ' '%(local_path)s/inasafe-test.conf' % {'local_path': local_path}) sed('%s/inasafe-test.conf' % local_path, 'inasafe-test.linfiniti.com', env.repo_site_name) with cd('/etc/apache2/sites-available/'): if exists('inasafe-test.conf'): sudo('a2dissite inasafe-test.conf') fastprint('Removing old apache2 conf', False) sudo('rm inasafe-test.conf') sudo('ln -s %s/inasafe-test.conf .' % local_path) # Add a hosts entry for local testing - only really useful for localhost hosts = '/etc/hosts' if not contains(hosts, 'inasafe-test'): append(hosts, '127.0.0.1 %s' % env.repo_site_name, use_sudo=True) sudo('a2ensite inasafe-test.conf') sudo('service apache2 reload')
def _setup_suite(): sudo('apt-get update') if files.exists('/tmp/supervisor.sock'): sudo('unlink /tmp/supervisor.sock') if files.exists('/var/run/supervisor.sock'): sudo('unlink /var/run/supervisor.sock') sudo('apt-get install -y ' 'nginx git python-pip postgresql ' 'postgresql-contrib libpq-dev python-dev') if not files.exists( 'etc/nginx/sites-available/original-default', use_sudo=True ): sudo( 'cp /etc/nginx/sites-available/default ' '/etc/nginx/sites-available/original-default' ) put(local_path="~/projects/t-buddies/simple_nginx_conf", remote_path="/etc/nginx/sites-available/default", use_sudo=True) append('/etc/nginx/sites-available/default', "server {listen 80;server_name " + env.active_instance.public_dns_name + "/;" "access_log /var/log/nginx/test.log;location /" "{proxy_pass http://127.0.0.1:8080;proxy_set_header Host $host;" "proxy_set_header X-Real-IP $remote_addr;" "proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;" "}}") if not files.exists("~/.previous/"): run('mkdir ~/.previous') sudo('service nginx start')
def setup_kiosk(): """ set up kiosk parts based on https://www.danpurdy.co.uk/web-development/raspberry-pi-kiosk-screen-tutorial/ or http://www.raspberry-projects.com/pi/pi-operating-systems/raspbian/gui/auto-run-browser-on-startup """ with hide("running", "stderr"): #@xscreensaver -no-splash comment("/etc/xdg/lxsession/LXDE-pi/autostart", "@xscreensaver -no-splash", use_sudo=True) append("/etc/xdg/lxsession/LXDE-pi/autostart", "@xset s off", use_sudo=True, escape=True) append("/etc/xdg/lxsession/LXDE-pi/autostart", "@xset -dpms", use_sudo=True, escape=True) append("/etc/xdg/lxsession/LXDE-pi/autostart", "@xset s noblank", use_sudo=True, escape=True) append("/etc/xdg/lxsession/LXDE-pi/autostart", """@sed -i 's/"exited_cleanly": false/"exited_cleanly": true/' ~/.config/chromium/Default/Preferences""", use_sudo=True, escape=True) #auto start if not contains("/etc/xdg/lxsession/LXDE-pi/autostart", "@chromium --noerrdialogs --kiosk http://www.page-to.display --incognito", use_sudo=True, escape=True): append("/etc/xdg/lxsession/LXDE-pi/autostart", "@chromium --noerrdialogs --kiosk http://dashingdemo.herokuapp.com/sample --incognito", use_sudo=True, escape=True)
def add_rackspace_monitoring_agent(username = None, apikey = None): if not nilsson_run('lsb_release --id --short') == 'Ubuntu': raise RuntimeError, "ERROR: Currently the only Linux distribution i know how to install the rackspace-monitoring-agent on is Ubuntu" need_sudo = am_not_root() release = nilsson_run('lsb_release --release --short') repo_url = 'http://stable.packages.cloudmonitoring.rackspace.com/ubuntu-%s-x86_64' % release repo_key = 'https://monitoring.api.rackspacecloud.com/pki/agent/linux.asc' need_sudo = am_not_root() append('/etc/apt/sources.list.d/rackspace-monitoring-agent.list', 'deb %s cloudmonitoring main' % repo_url, use_sudo=need_sudo) nilsson_run('curl %s | apt-key add -' % repo_key, use_sudo = need_sudo) pkg_install('rackspace-monitoring-agent', max_hours=0) if not username or not apikey: print "WARNING: username or API key missing, not configuring the rackspace-monitoring-agent. Proceed manually: " print " " print " sudo rackspace-monitoring-agent --setup" print " " return nilsson_run('rackspace-monitoring-agent --setup --username %s --apikey %s' % (username, apikey), use_sudo = need_sudo) nilsson_run('service rackspace-monitoring-agent start', use_sudo = need_sudo)
def set_hostname(hostname): # TODO: reload MTA if not hostname or hostname == 'None': hostname = env.host need_sudo = am_not_root() with settings(warn_only=True): hosts_entry = run('grep "^127\.0\.1\.1 " /etc/hosts') line_to_append = '127.0.1.1 %s' % hostname if hosts_entry == line_to_append: pass elif not hosts_entry: # TODO: this should be placed right under the line ^127.0.0.1 append('/etc/hosts', '127.0.1.1 %s' % hostname, use_sudo=need_sudo) else: sed('/etc/hosts', '^(127\.0\.1\.1) (.*)$', '\\1 %s \\2' % hostname, use_sudo=need_sudo) if distro_flavour() == 'redhat': sed('/etc/sysconfig/network', '^HOSTNAME=.*', 'HOSTNAME=%s' % hostname, use_sudo=need_sudo) _run('hostname %s' % hostname, use_sudo=need_sudo) else: _run('echo %s > /etc/hostname' % hostname, use_sudo=need_sudo) _run('hostname -F /etc/hostname', use_sudo=need_sudo) if exists('/etc/mailname'): _run('echo %s > /etc/mailname' % hostname, use_sudo=need_sudo) # Restart logging service servicename = 'rsyslog' if exists('/etc/init.d/%s' % servicename): _run('service %s restart' % servicename, use_sudo=need_sudo) else: # CentOS<6 and old Ubuntus and some Debians might not use 'rsyslog' print 'WARN: Could not identify syslogging service. Please restart manually.'
def server_customizations(): '''Customize the server (user, authorized_keys, ...).''' username = env.user env.user = '******' # create user all_users = run('cut -d: -f1 /etc/passwd').split() if username not in all_users: host = env.host run(flo('adduser {username}')) # add user to the sudo group, cf. http://askubuntu.com/a/7484 #run('sudo adduser {username} sudo'.format(**locals())) # http://jeromejaglale.com/doc/unix/ubuntu_sudo_without_password append('/etc/sudoers', flo('{username} ALL=(ALL) NOPASSWD: ALL'), use_sudo=True) # set up password-less login local(flo('ssh-copy-id -i ~/.ssh/id_rsa.pub {username}@{host}')) env.user = username # Disable service apache2 httpd, cf. http://askubuntu.com/a/355102 sudo('update-rc.d apache2 disable') else: print(magenta(flo(' nothing to do, user {username} already exists'))) env.user = username
def setup_munin_plugin_rabbitmq(vhost=None): ''' Install and configure the RabbitMQ plugin for Munin from https://github.com/ask/rabbitmq-munin ''' need_sudo = am_not_root() conf = '/etc/munin/plugin-conf.d/munin-node' # install rabbitmq-munin into /opt/ pkg_install('git') _run('git clone https://github.com/ask/rabbitmq-munin.git /opt/rabbitmq-munin', use_sudo = need_sudo) # Configure munin-node to use rabbitmq-munin config_string = """ [rabbitmq_*] user root """ if vhost: config_string += '\nenv.vhost %s' % vhost backup_orig(conf, use_sudo = need_sudo) append(conf, config_string, use_sudo = need_sudo) # Make rabbitmq-munin available in /etc/munin/plugins/ _run('ln -sf /opt/rabbitmq-munin/rabbitmq_* /etc/munin/plugins', use_sudo = need_sudo) _run('service munin-node restart', use_sudo = need_sudo)
def user_config(username,port): with settings(sudo_user=username,warn_only=True): # append anaconda path to .bashrc append('/home/%s/.bashrc' % username,'export PATH=/usr/local/anaconda/bin:$PATH',use_sudo=True) # create log dir sudo('mkdir /home/%s/logs' % username) sudo('touch /home/%s/logs/ipython_supervisor.log' % username) # create ipython notebook profile sudo('ipython profile create %s' % username,user=username) # sudo('chmod -r {u} /home/{u}/.ipython'.format(u=username),user=root) # get hashed password hashed = passwd() # write ipynb config file ipynb_config_file = IPYNB_CONF.format(u=username) backup_config(ipynb_config_file) append(filename=ipynb_config_file, text=IPYNB_CONF_TEMPLATE.format(u=username, p=port, hpw=hashed, ), use_sudo=True, )
def add_head_to_worker(head_ip): head_hostname = get_hostname_from_ip(head_ip) if not files.contains(head_hostname, '/etc/hosts'): files.append(head_ip+' '+head_hostname, '/etc/hosts', use_sudo=True) sudo('rm -f /etc/torque/server_name') files.append(head_hostname, '/etc/torque/server_name', use_sudo=True)
def push_key(key_file='~/.ssh/id_rsa.pub'): key_text = read_key_file(key_file) with settings(user=env.sudouser): run('id') sudo("mkdir -p %(userhome)s/.ssh" % env, user=env.appuser) append('%(userhome)s/.ssh/authorized_keys' % env, key_text, use_sudo=True)
def init_user(): """Creates a user named wonderhop for access. Copies pubkey.""" with settings(user="******"): if not exists("/home/wonderhop"): run("useradd -m -s /bin/bash -U wonderhop") append("/etc/sudoers", "wonderhop ALL = NOPASSWD: ALL") copy_sshkey()
def clone_repo(): """Clone the wonderhop repo on the server""" # Add known hosts for Github append("~/.ssh/known_hosts", [ "|1|AxYrTZcwBIPIFSdy29CGanv85ZE=|D0Xa0QCz1anXJ9JrH4eJI3EORH8= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==", "|1|ErT4pRs4faesbyNw+WB0hWuIycs=|9+4iN3FDijMOl1Z+2PNB9O9wXjw= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==", ]) if not exists("~/.ssh/id_github_deploy"): # Generate a public/private key pair run("ssh-keygen -q -t rsa -f ~/.ssh/id_github_deploy -N ''") ssh_pub_key = StringIO() get("~/.ssh/id_github_deploy.pub", ssh_pub_key) ssh_pub_key = ssh_pub_key.getvalue().strip() # Add it to Github gh_user = prompt("Github username?") gh_pass = getpass("Github password? ") urllib2.urlopen(urllib2.Request("https://api.github.com/repos/wonderhop/wonderhop/keys", json.dumps({ "title": "wonderhop@{0}".format(env.host), "key": ssh_pub_key, }), { "Content-Type": "application/json", "Authorization": "Basic {0}".format(base64.b64encode("{0}:{1}".format(gh_user, gh_pass))), })) # Specify that we should use the given key for Github append("~/.ssh/config", "Host github.com\nIdentityFile ~/.ssh/id_github_deploy") run("git clone [email protected]:wonderhop/wonderhop.git")
def add_to_path(path): """Add a new directory to PATH for the default shell""" from fabric.contrib.files import append import vars vars = vars.Vars() for file in [ vars.os.default_shell_config, vars.os.default_loginshell_config ]: append(file, "export PATH=$PATH:"+path, use_sudo=True)
def mountVolume(self): env.use_ssh_config = True try: with settings(hide('warnings', 'stderr'), warn_only=True, host_string=self.ip): if not DRY: sudo('vgchange -ay data') time.sleep(10) uuid, fstype = sudo( 'blkid |grep "mapper/data" |cut -f2,3 -d" "').split() fstype = fstype.split('"')[1] files.append('/etc/fstab', '{0}\t{1}\t{2}\tnoatime\t0\t0'.format( uuid, MOUNT_POINT, fstype), use_sudo=True) sudo('mount {0}'.format(MOUNT_POINT)) if DEBUG: print 'Mounted {0} on {1}'.format(MOUNT_POINT, self.name) except Exception as e: raise
def _locales_generic(names, config_file, command): supported = supported_locales() _check_for_unsupported_locales(names, supported) # Regenerate locales if config file changes with watch(config_file, use_sudo=True) as config: # Add valid locale names to the config file charset_from_name = dict(supported) for name in names: charset = charset_from_name[name] locale = "%s %s" % (name, charset) uncomment(config_file, escape(locale), use_sudo=True, shell=True) append(config_file, locale, use_sudo=True, partial=True, shell=True) if config.changed: run_as_root(command)
def _setup_nginx_file(): file_path = '/etc/nginx/sites-enabled/{}'.format(project_name) _debug('Checking nginx file {}...'.format(file_path)) checked_domain = _check_domain() if exists(file_path): local_file_path = '/tmp/nginx.tmp' get(file_path, local_file_path) file_content = open(local_file_path, 'r').read() if checked_domain and checked_domain not in file_content: content = [] for line in file_content.split('\n'): if 'server_name ' in line: line = line.replace( 'server_name', 'server_name {}'.format(checked_domain)) elif 'listen ' in line: line = ' listen 80;' content.append(line) file_descriptor = open('/tmp/nginx.tmp', 'w') file_descriptor.write('\n'.join(content)) put(file_descriptor, file_path) _debug('Restarting nginx...') run('/etc/init.d/nginx restart') else: _debug('Creating nginx file {}...'.format(file_path)) local_port = _available_port() if checked_domain: port = 80 server_name = checked_domain else: port = local_port + 1000 server_name = env.hosts[0] text = NGINEX_FILE_CONTENT.format(project_name=project_name, server_name=server_name, port=port, local_port=local_port) append(file_path, text) _debug('Nginx configured with {}:{}'.format(server_name, port)) _debug('Restarting nginx...') run('/etc/init.d/nginx restart')
def memcached_configure(self, **kwargs): for key, (opt, pat) in self.memcached_conf_patterns.iteritems(): if key in kwargs: val = kwargs[key] else: val = getattr(self, 'memcached_{0}'.format(key), None) if val is None: continue full_pat = '^#?\s*{0}\s+{1}'.format(opt, pat) full_val = '{0} {1}'.format(opt, val) # we don't know if the config file contains the line or not, so # comment it out if it does and then add a new line with the date files.comment(self.memcached_conf, full_pat, use_sudo=True) if val != '': date = datetime.datetime.now().strftime('%Y-%m-%d') message = '\n# {0}; added by fabulaws {1}\n{2}'\ ''.format(key, date, full_val) files.append(self.memcached_conf, message, use_sudo=True) if self.memcached_ulimit is not None: ulimit = 'ulimit -n {0}'.format(self.memcached_ulimit) files.append(self.memcached_init_default, ulimit, use_sudo=True) self.memcached_service('restart')
def locales(names): """ Require the list of locales to be available. """ config_file = '/var/lib/locales/supported.d/local' # Regenerate locales if config file changes with watch(config_file, use_sudo=True) as config: # Add valid locale names to the config file supported = dict(supported_locales()) for name in names: if name in supported: charset = supported[name] locale = "%s %s" % (name, charset) append(config_file, locale, use_sudo=True) else: warn('Unsupported locale name "%s"' % name) if config.changed: sudo('dpkg-reconfigure locales')
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=y') append('.env', f'SITENAME={env.host}') current_contents = run('cat .env') if 'DJANGO_SECRET_KEY' not in current_contents: pool = string.ascii_letters + string.punctuation + string.digits new_secret_key = ''.join(random.SystemRandom().choices(pool, k=50)) append('.env', f'DJANGO_SECRET_KEY={new_secret_key}')
def _update_settings(source_folder, site_name): settings_path = source_folder + '/superlists/superlists/settings.py' sed(settings_path, "DEBUG = True", "DEBUG = False") sed(settings_path, 'ALLOWED_HOSTS =.+$', 'ALLOWED_HOSTS = ["%s"]' % (site_name,) ) secret_key_file = source_folder + '/superlists/secret_key.py' if not exists(secret_key_file): chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' key = ''.join(random.SystemRandom().choice(chars) for _ in range(50)) append(secret_key_file, "SECRET_KEY = '%s'" % (key,)) append(settings_path, '\nimport sys') append(settings_path, '\nsys.path.insert(0, \'/home/elspeth/sites/miniguez.com/source/superlists\')') append(settings_path, '\nfrom secret_key import SECRET_KEY')
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=y') append('.env', f'SITENAME={env.host}') current_contents = run('cat .env') if 'DJANGO_SECRET_KEY' not in current_contents: new_secret = ''.join(random.SystemRandom().choices( '1*b84qe7z%4v)(_ji01k^_xx2tkhs062zpsza9gpzl3rxdfhfv', k=50)) append('.env', f'DJANGO_SECRET_KEY={new_secret}')
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=y', use_sudo=True) append('.env', f'SITENAME={env.host}', use_sudo=True) current_contents = sudo('cat .env', user='******') if 'DJANGO_SECRET_KEY' not in current_contents: new_secret = ''.join(random.SystemRandom().choices( 'abcdefghijklmnoprstuvwxyz0123456789', k=50)) append('.env', f'DJANGO_SECKRET_KEY={new_secret}', use_sudo=True)
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=y') append('.env', f"SITENAME={env.host}") current_contents = run("cat .env") if "DJANGO_SECRET_KEY" not in current_contents: new_secret = "".join(random.SystemRandom().choices( "vhbjkvbkjreb328y5839271598", k=50)) append(".env", f"DJANGO_SECRET_KEY={new_secret}")
def _create_or_update_dotenv(): append(".env", "DJANGO_DEBUG_FALSE=y") append(".env", f"SITENAME={env.host}") current_contents = run("cat .env") if "DJANGO_SECRET_KEY" not in current_contents: new_secret = "".join(random.SystemRandom().choices( "abcdefghijklmnopqrstuvwxyz0123456789", k=50)) append(".env", f"DJANGO_SECRET_KEY={new_secret}")
def _create_or_update_dotenv(domain): append(".envrc", "DJANGO_DEPLOY=y") append(".envrc", f"SITENAME={domain}") current_contents = run("cat .envrc") if "DJANGO_SECRET_KEY" not in current_contents: choices = "abcdefghijklmnopqrstuvwxyz123456789" new_secret = "".join(random.SystemRandom().choices(choices, k=50)) append(".envrc", f"DJANGO_SECRET_KEY={new_secret}")
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=yes') append('.env', f'SITENAME={env.host}') current_content = run('cat .env') if 'DJANGO_SECRET_KEY' not in current_content: secret_key = ''.join(random.SystemRandom().choices( 'abcdefghijklmnopqrstuvwxyz', k=50)) append('.env', f'DJANGO_SECRET_KEY={secret_key}')
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=y') append('.env', f'SITENAME={env.host}') current_contents = run('cat .env') if 'DJANGO_SECRET_KEY' not in current_contents: new_secret = ''.join(random.SystemRandom().choices( 'qwertyuiop1234567890asdfghjklzxcvbnm', k=50)) append('.env', f'DJANGO_SECRET_KEY={new_secret}')
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=y') append('.env', f'SITENAME={env.host}') current_contents = run('cat .env') if 'DJANGO_SECRET_KEY' not in current_contents: new_secret = ''.join(random.SystemRandom().choices( 'adcdefghijklmnopqrstuvwxyz0123456789', k=50)) append('.env', f'DJANGO_SECRET_KEY={new_secret}')
def _create_or_update_dotenv(): append('.env', 'DJANGO_DEBUG_FALSE=y') # Adds to file if does not exist append('.env', f'SITENAME={env.host}') current_contents = run('cat .env') if 'DJANGO_SECRET_KEY' not in current_contents: secret_key = ''.join(random.SystemRandom().choices( 'abcdefghijklmnopqrstuvwxyz0123456789', k=50)) append('.env', f'DJANGO_SECRET_KEY={secret_key}')
def _setup_bash_aliases(home_folder): # Setup virtualenv bash aliases append(home_folder + '/.bashrc', '\nexport WORKON_HOME=~/.virtualenvs') append(home_folder + '/.bashrc', '\nexport VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3') append(home_folder + '/.bashrc', '\nsource /usr/local/bin/virtualenvwrapper.sh') run('source ~/.bashrc')
def fab_criar_crons(self): ''' Insere tarefa definida em ../cron/cronconf no crontab do servidor ''' crontab_location = '/etc/crontab' with cd(env.code_root): if os.path.exists('cron'): from cron.cronconf import CRONS import re sudo('chmod 646 ' + crontab_location) for cron in CRONS: if cron['comando_de_projeto'] and not cron[ 'django_management']: linha_cron = cron['tempo'] + ' ' + cron[ 'usuario'] + ' ' + env.code_root + '/' + cron[ 'comando'] else: if cron['comando_de_projeto'] and cron[ 'django_management']: linha_cron = cron['tempo'] + ' ' + cron[ 'usuario'] + ' /usr/bin/python ' + env.code_root + '/' + cron[ 'comando'] + ' --settings=med_alliance.settings.' + env.ambiente else: linha_cron = cron['tempo'] + ' ' + cron[ 'usuario'] + ' ' + cron['comando'] if cron['ligado']: if not contains(crontab_location, re.escape(linha_cron)): append(crontab_location, linha_cron, use_sudo=False) else: uncomment(crontab_location, re.escape(linha_cron)) else: if contains(crontab_location, re.escape(linha_cron)): comment(crontab_location, re.escape(linha_cron)) sudo('chmod 644 ' + crontab_location)
def install(): """ Installs and configures ruby """ # update apt index update_index(quiet=False) # rvm requirements dependencies = [ 'build-essential', 'openssl', 'libreadline6', 'libreadline6-dev', 'curl', 'git-core', 'zlib1g', 'zlib1g-dev', 'libssl-dev', 'libyaml-dev', 'libsqlite3-dev', 'sqlite3', 'libxml2-dev', 'libxslt-dev', 'autoconf', 'libc6-dev', 'ncurses-dev', 'automake', 'libtool', 'bison', 'subversion', 'pkg-config' ] for dependency in dependencies: utils.deb.install(dependency) # rvm installation cmd = 'curl -L https://get.rvm.io | bash -s stable' run(cmd) # rvm path added depending on the shell rvm_path = 'source "$HOME/.rvm/scripts/rvm"' cmd = 'echo $SHELL' shell = run(cmd) if search('zsh', shell): if exists('.zshrc'): print(green('Adding rvm to .zshrc')) append('.zshrc', rvm_path) elif search('bash', shell): if exists('.bashrc'): print(green('Adding rvm to .bashrc')) append('.bashrc', rvm_path) else: print(red('Shell not supported')) # ruby installation with prefix(rvm_path): install_ruby_version()
def base_install(): """ Perform the basic install """ # Update the AMI completely sudo('yum --assumeyes --quiet update') # Install puppet and git sudo('yum --assumeyes --quiet install puppet git') # Clone our code run('git clone git://github.com/ICRAR/boinc-magphys.git') # Puppet and git should be installed by the python with cd('/home/ec2-user/boinc-magphys/machine-setup'): sudo('puppet boinc-magphys.pp') # Recommended version per http://boinc.berkeley.edu/download_all.php on 2012-07-10 run('svn co http://boinc.berkeley.edu/svn/trunk/boinc /home/ec2-user/boinc' ) # Setup the pythonpath append('/home/ec2-user/.bash_profile', [ '', 'PYTHONPATH=/home/ec2-user/boinc/py:/home/ec2-user/boinc-magphys/server/src', 'export PYTHONPATH' ]) # Setup the python run('wget http://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11-py2.7.egg' ) sudo('sh setuptools-0.6c11-py2.7.egg') run('rm setuptools-0.6c11-py2.7.egg') sudo('rm -f /usr/bin/easy_install') sudo('easy_install-2.7 pip') sudo('rm -f /usr/bin/pip') sudo('pip-2.7 install fabric') sudo('pip-2.7 install configobj')
def setup_swap(): """ Setup SWAP and configure swappiness """ check_sudo() check_os() print_green('INFO: Setup SWAP...') t = sudo('swapon -s', quiet=True) if not re.search(r'\s\d+\s', t): swap_size = int( prompt( "Server doesn't have SWAP. Set size in MB to create SWAP. Keep 0 to skip.", default='0', validate=r'\d+')) if swap_size: swap_fn = '/swapfile' sudo('fallocate -l {size}M {sfn}'.format(size=swap_size, sfn=swap_fn)) command_defrag = 'e4defrag {sfn}'.format(sfn=swap_fn) print_green( 'Defragmenting swap file: {}...'.format(command_defrag)) sudo(command_defrag, quiet=True) sudo( 'chown root:root {sfn} && chmod 600 {sfn}'.format(sfn=swap_fn)) sudo('mkswap {sfn}'.format(sfn=swap_fn)) sudo('swapon {sfn}'.format(sfn=swap_fn)) append('/etc/fstab', '{sfn} swap swap defaults 0 0'.format(sfn=swap_fn), use_sudo=True) swappiness_size = int( prompt("Set vm.swappiness parameter to /etc/sysctl.conf", default='10', validate=r'\d+')) append('/etc/sysctl.conf', 'vm.swappiness={}'.format(swappiness_size), use_sudo=True) sudo('sysctl -p') print_green('INFO: Setup SWAP... OK')
def config_catchall(self): self.server_id.get_env() client = self.get_client() modules = ['auth_server_admin_passwd_passkey', 'mail'] for module in modules: if client.modules(name=module, installed=True) is None: raise Warning( _("You can not configure catchall if module '%s' is not installed in the database") % (module)) if not self.local_alias: raise Warning( _("You can not configure catchall if Local Alias is not set. Probably this is because Mailgate File was not found")) if not exists(self.mailgate_path, use_sudo=True): raise Warning(_("Mailgate file was not found on mailgate path '%s' base path found for mail module") % ( self.mailgate_path)) # Configure domain_alias on databas client.model('ir.config_parameter').set_param( "mail.catchall.domain", self.domain_alias or '') # clean and append virtual_alias if exists(self.server_id.virtual_alias_path, use_sudo=True): sed( self.server_id.virtual_alias_path, '@%s.*' % self.domain_alias, '', use_sudo=True, backup='.bak') append( self.server_id.virtual_alias_path, self.virtual_alias, use_sudo=True, partial=True) # clean and append virtual_alias if exists(self.server_id.local_alias_path, use_sudo=True): sed( self.server_id.local_alias_path, '%s.*' % self.domain_alias, '', use_sudo=True, backup='.bak') append( self.server_id.local_alias_path, self.local_alias, use_sudo=True) sudo('postmap /etc/postfix/virtual_aliases') sudo('newaliases') sudo('/etc/init.d/postfix restart')
def setup_backup_client(): """Sets up target host to do automatic daily Apache and MySQL backup""" prompt('Database user for mysql:', 'db_user') env.db_pass = getpass('Database password for mysql:') sudo("mkdir -p /srv/backup/data") sudo("mkdir -p /srv/backup/periodic") sudo("mkdir -p /srv/backup-scripts") sudo("chown -R ui-backup.ui-backup /srv/backup") sudo("sudo chmod -R a+rx backup-scripts") sudo("ln -s /var/www/ /srv/backup/data/apache/www") # Upload necessary templates and backup scriptsf upload_template('backup/backup.sh.tpl', env.HOME_PATH, context={ 'db_user': env.db_user, 'db_pass': env.db_pass, }) put('backup/automysqlbackup-ui.sh', env.HOME_PATH) put('backup/br-apache.sh', env.HOME_PATH) put('backup/last-full/userinspired-full-date', env.HOME_PATH) put('backup/periodic.sh', env.HOME_PATH) sudo("mv automysqlbackup-ui.sh /srv/backup-scripts/") sudo("mv br-apache.sh /srv/backup-scripts/") sudo("mv backup.sh.tpl /srv/backup-scripts/backup.sh") sudo("mv periodic.sh /srv/backup-scripts/") sudo("mkdir -p /srv/backup-scripts/last-full") sudo("mv userinspired-full-date /srv/backup-scripts/last-full") sudo("chmod +x /srv/backup-scripts/*.sh") append('00 1 * * * ui-backup /srv/backup-scripts/backup.sh', '/etc/crontab', use_sudo=True) # append('30 1 * * * root rsync -avz --delete /var/www /srv/backup/data/apache/', '/etc/crontab', use_sudo=True) append('00 2 * * * ui-backup /srv/backup-scripts/periodic.sh', '/etc/crontab', use_sudo=True)
def _update_settings(source_folder, site_name): settings_path = source_folder + '/superlists2017/settings.py' sed(settings_path, "DEBUG = TRUE", "DEBUG = False") sed(settings_path, 'DOMAIN = "localhost"', 'DOMAIN = "%s"' % (site_name, )) #sed(settings_path, # 'ALLOWED_HOSTS = .+$', # 'ALLOWED_HOSTS = ["%s"]' % (site_name,) #) # PYTHON 3 VERSION #sed(settings_path, # 'ALLOWED_HOSTS =.+$', # f'ALLOWED_HOSTS = ["{site_name}"]' #) secret_key_file = source_folder + '/superlists2017/secret_key.py' if not exists(secret_key_file): print('doesnt exist') chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' key = ''.join(random.SystemRandom().choice(chars) for _ in range(50)) append(secret_key_file, "SECRET_KEY = '%s'" % (key, )) # python 3.6 version # append(secret_key_file, f'SECRET_KEY = "{key}"') append(settings_path, '\nfrom .secret_key import SECRET_KEY')
def install_package(self, pkg): """Installs package on the host using apt-get install bypassing authentication. This method should be used for testing package installation before using push_to_repo.""" base_dir, src_dir, build_dir = debian._setup(clean=False) pkg_dir = self.mkdir(base_dir+"/pkg_dir") rsync(pkg,pkg_dir) with cd(pkg_dir): print green(append("/etc/apt/sources.list", "deb file://{0} /".format(pkg_dir))) print green(run("dpkg-scanpackages . /dev/null | gzip -c -9 > Packages.gz")) pkg_name = run("dpkg -f {0} | grep '^Package: ' | sed -e 's/Package: //'".format(pkg)) pkg_version = run("dpkg -f {0} | grep '^Version: ' | sed -e 's/Version: //'".format(pkg)) print green(run("apt-get update -qq")) print green(run("apt-get install {0}={1} -qq --allow-unauthenticated".format(pkg_name,pkg_version)))
def install_jenkins(): sudo('export DEBIAN_FRONTEND=noninteractive') hostname = run("cat /etc/hostname") append("/etc/hosts", "127.0.0.1\t%s" % hostname, use_sudo=True) run("wget -q http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key") sudo("apt-key add jenkins-ci.org.key") append("/etc/apt/sources.list", "deb http://pkg.jenkins-ci.org/debian binary/", use_sudo=True) sudo("apt-get update -q -y") sudo("apt-get install -q -y jenkins") sudo("apt-get install -q -y git iceweasel python3 python-virtualenv xvfb") # For npm sudo("apt-get install -q -y curl build-essential openssl libssl-dev") run("git clone https://github.com/joyent/node.git") with cd("node"): run("./configure") run("make") sudo("make install") run("curl -L -O https://npmjs.org/install.sh") run("chmod +x install.sh") sudo("./install.sh") sudo("npm install -g phantomjs")
def configure_tomcat(): sudo('/etc/init.d/tomcat7 stop') files.sed('/etc/tomcat7/server.xml', '<Connector port="8080" protocol="HTTP/1.1"', '<Connector port="8983" protocol="HTTP/1.1"', use_sudo=True) files.append( '/etc/tomcat7/tomcat-users.xml', '<role rolename="manager-gui"/><user username="******" password="******" roles="manager-gui"/>', use_sudo=True) files.sed('/etc/default/tomcat7', 'TOMCAT7_USER=tomcat7', 'TOMCAT7_USER=kat', use_sudo=True) files.sed('/etc/default/tomcat7', 'TOMCAT7_GROUP=tomcat7', 'TOMCAT7_GROUP=kat', use_sudo=True) files.append( '/etc/default/tomcat7', 'CATALINA_OPTS="-Dsolr.solr.home=/var/kat/archive/catalogs/solr"', use_sudo=True) sudo('/etc/init.d/tomcat7 start')
def add_bond_cfg_to_nic(self,nic_name=[]): """ Add Bonding cfg and related cfg params to NIC's ntk-scripts nic_name = pass it as a list of nics which are taking part in bonding """ env.host_string = self.host_ip env.user = self.user env.pwd = self.pwd if nic_name == []: print 'ERROR: List of NICs is empty' sys.exit(1) for nic in nic_name: path='/etc/sysconfig/network-scripts/ifcfg-%s' %(nic) append_new_cfg_lines = ['MASTER=bond0','SLAVE=yes'] over_write_cfg_lines = [ "sed -i 's/DEVICE.*/DEVICE=%s/' %s" %(nic,path), "sed -i 's/BOOTPROTO.*/BOOTPROTO=none/' %s" %(path), "sed -i 's/ONBOOT.*/ONBOOT=yes/' %s" %(path), "sed -i 's/NM_CONTROLLED.*/NM_CONTROLLED=no/' %s" %(path) ] for cmd in over_write_cfg_lines: run(cmd) files.append(path,append_new_cfg_lines)
def add_repos(): """Clean and Add necessary repositories and updates""" # Install Stackops and Ubuntu cloud repos keys sudo('wget -O - http://repos.stackops.net/keys/stackopskey_pub.gpg ' '| apt-key add -') with settings(warn_only=True): sudo('echo "' '" > /etc/apt/sources.list.d/stackops.list') sudo('sed -i /precise-updates/d /etc/apt/sources.list') sudo('sed -i /precise-security/d /etc/apt/sources.list') sudo('sed -i /archive.ubuntu.com/d /etc/apt/sources.list') for repo in REPOS: files.append('/etc/apt/sources.list', repo, use_sudo=True) sudo('apt-get -y update') # Install Ubuntu cloud repos keys package_ensure('ubuntu-cloud-keyring') sudo('apt-get -y update')
def install_application(): pub_key = _pub_key() if exists('/etc/init.d/socnet'): run('rm /etc/init.d/socnet') put('etc/init.d/socnet', '/etc/init.d/socnet', mode=0755) if not exists(APPLICATION_DIR): run('mkdir -p /var/socnet/') try: run('userdel -rf %s' % (APPLICATION_USER, )) except: pass run('useradd %s --home-dir %s --create-home --shell /bin/bash' % (APPLICATION_USER, APPLICATION_DIR)) with cd(APPLICATION_DIR): if not exists('.ssh'): run('mkdir .ssh') run('chmod 700 .ssh') put('ssh/*', '%s/.ssh' % APPLICATION_DIR, mode=0600) append(pub_key, '.ssh/authorized_keys') run('chown -R %s:%s .ssh' % (APPLICATION_USER, APPLICATION_USER))