def configure_nginx(): #Do this for new servers only run("sudo /etc/init.d/nginx start") print green("Copying nginx.config virtual host file for ajibika.org to the sites-available directory") with settings(warn_only=True): if file_exists("/etc/nginx/sites-available/www.ajibika.org"): run("sudo rm /etc/nginx/sites-enabled/www.ajibika.org") result = put("conf/www.ajibika.org", "/etc/nginx/sites-available/", use_sudo=True) if result.failed and not confirm("Unable to copy www.ajibika.org to sites-enabled dir. Continue anyway?"): abort("Aborting at user request.") print green("conf/www.ajibika.org has been copied") print red("Removing old nginx configs") if file_exists("/etc/nginx/sites-enabled/default"): result = run("sudo rm /etc/nginx/sites-enabled/default") if result.failed and not confirm("Unable to Removing old nginx configs. Continue anyway?"): abort("Aborting at user request.") print magenta("Now Symlinking the ajibika virtual host file to sites sites-enabled") if not file_exists("/etc/nginx/sites-enabled/www.ajibika.org"): with settings(warn_only=True): result = run("sudo ln -s /etc/nginx/sites-available/www.ajibika.org /etc/nginx/sites-enabled/www.ajibika.org") if result.failed and not confirm("Unable to symlink the angani \ virtual host file to sites sites-enabled. Continue anyway?"): abort("Aborting at user request.") print "sudo reload nginx" run("sudo /etc/init.d/nginx reload")
def _configure_web_runner_web(): venv_webrunner_web = _get_venv_path(VENV_WEB_RUNNER_WEB) venv_webrunner_web_activate = '%s%sbin%sactivate' % (venv_webrunner_web, os.sep, os.sep) repo_path = _get_repo_path() settings_prod = repo_path + '/web_runner_web/web_runner_web/' \ 'settings.production.py' settings_link = repo_path + '/web_runner_web/web_runner_web/settings.py' # Create the settings.py if not cuisine.file_exists(settings_link): run('ln -s %s %s' % (settings_prod, settings_link)) # Create the Django DB and Django users django_db = repo_path + '/web_runner_web/db.sqlite3' if not cuisine.file_exists(django_db): with virtualenv(VENV_WEB_RUNNER_WEB): run("cd %s/web_runner_web && ./manage.py syncdb --noinput" % repo_path) run('tmux new-window -k -t webrunner:4 -n django_config') run("tmux send-keys -t webrunner:4 'source %s' C-m" % venv_webrunner_web_activate) run("tmux send-keys -t webrunner:4 'cd %s' C-m" % repo_path) run("tmux send-keys -t webrunner:4 'cd web_runner_web' C-m") run("tmux send-keys -t webrunner:4 './manage.py shell' C-m") run("tmux send-keys -t webrunner:4 'from django.contrib.auth.models import User' C-m" ) run("tmux send-keys -t webrunner:4 'user = User.objects.create_user(username=\"admin\", password=\"Content\")' C-m" ) run("tmux send-keys -t webrunner:4 'exit()' C-m")
def disable_services(): with hide('output', 'running'): with settings(warn_only=True): for service in ['devpanel-taskd', 'devpanel-dbmgr']: if cuisine.file_exists('/etc/init/%s.conf' % service): run('echo mv /etc/init/%s.conf /etc/init/%s.conf.disabled' % (service, service)) if cuisine.file_exists('/etc/init.d/%s' % service): run('echo update-rc.d %s disabled' % service)
def _logrotate(): date = run('date "+%y%m%d-%H%M%S"') if cuisine.file_exists('/var/log/nginx/access.log'): sudo( 'gzip -c /var/log/nginx/access.log > /var/log/nginx/access-%s.gz' % date) sudo(': > /var/log/nginx/access.log') if cuisine.file_exists('/var/log/nginx/error.log'): sudo('gzip -c /var/log/nginx/access.log > /var/log/nginx/error-%s.gz' % date) sudo(': > /var/log/nginx/error.log')
def cpanm_bin_installed(home='/tmp'): cuisine.select_package(option='yum') cuisine.package_ensure('perl-devel') binpath = '%s/.deploy/bin' % home cpanm = '%s/cpanm' % binpath if not cuisine.file_exists(cpanm): cuisine.dir_ensure(binpath, recursive=True, mode=755) cuisine.package_ensure('curl') run('curl -L http://cpanmin.us > %s' % cpanm) run('chmod 755 %s' % cpanm) cuisine.file_exists(cpanm) return cpanm
def deploy(): """ Push code, sync, migrate, generate media, restart """ assert_git_valid() with cd(env.root_dir): with prefix(env.activate): # clone the latest repo if file_exists("_latest"): abort("deploy halted: remove dead _latest clone") run("git clone -b %(branch)s %(repo)s releases/_latest" % env) # link settings_local.py # TODO: handle template file_link("/srv/%(app)s/shared/settings_local.py", "/srv/%(app)s/releases/_latest/%(app)s/settings_local.py" % env) # get the latest release env.latest_release = run("git --git-dir=releases/_latest/.git rev-parse origin/%(branch)s" % env) # pip install requirements output = run("pip install -r releases/_latest/requirements.txt") if output.failed: abort('deploy halted: pip install failed!') # migrate output = run("python releases/_latest/%(app)s/manage.py migrate" % env) if output.failed: abort('deploy halted: migration failed!') # collectstatic file_link("/srv/%(app)s/shared/%(static_root)s" % env, "releases/_latest/%(app)s/%(static_root)s" % env) output = run('python releases/_latest/%(app)s/manage.py collectstatic --noinput -i "*.pyc"' % env) if output.failed: abort('deploy halted: collectstatic failed!') # swap symlinks run("mv releases/_latest releases/%(latest_release)s" % env) if file_exists("current"): if file_exists("previous"): run("rm previous") run("mv current previous") # link current to latest release file_link("/srv/%(app)s/releases/%(latest_release)s" % env, "current") # restart supervisord # TODO: there must be a better way #sudo('supervisorctl status %(app)s | sed "s/.*[pid ]\([0-9]\+\)\,.*/\\1/" | xargs kill -HUP' % env) # restart gunicorn sudo('/etc/init.d/%(app)s restart')
def python(): cuisine.package_ensure([ 'python', 'python-setuptools', 'python-dev', 'python-mysqldb' ]) if not cuisine.file_exists('/usr/local/bin/pip'): with cd('/tmp'): cuisine.run('curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py') cuisine.run('python get-pip.py') if not cuisine.file_exists('/usr/local/bin/virtualenv'): cuisine.run('easy_install -UZ virtualenv')
def python_gunicorn_stop(pid_file_path): if file_exists(pid_file_path): out = _run('ps -p $(cat %(pid_file_path)s)|grep gunicorn') if out.stdout and out.succeeded: _run('kill $(cat %(pid_file_path)s)' % dict(pid_file_path=pid_file_path), sudo=True)
def stage9_container_openstack_swift_automate_all_the_startups(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" chown -R swift:swift /srv/node chown -R swift:swift /etc/swift chown -R swift:swift /var/cache/swift chown -R swift:swift /var/run/swift """) run(""" service memcached restart """) run(""" swift-init all stop || true service memcached restart echo stats | nc localhost 11211 | grep 'STAT uptime' swift-init all start """)
def install(force=False): """Install python""" version = get_config()['version'] install_dir = os.path.join(_INSTALL_DIR, 'python', version) python_bin = os.path.join(install_dir, 'bin', 'python') if cuisine.file_exists(python_bin): if not force: fab.puts("Python {0} found, skipping installation".format(version)) return else: fab.puts("Reinstalling Python {0} found".format(version)) cuisine.package_install(['build-essential', 'libcurl4-openssl-dev']) src_dir = fab.run('mktemp -d') with fab.cd(src_dir): fab.puts("Downloading python {0}".format(version)) fab.run("wget -q '%s' -O - | tar xz" % PYTHON_DOWNLOAD_URL.format(version=version)) with fab.cd('Python-{0}'.format(version)): fab.puts("Installing python {0}".format(version)) fab.run("./configure --prefix=%s" % install_dir) fab.run("make") fab.sudo('make install') fab.run('rm -rf {0}'.format(src_dir))
def install_diff_highlight(): print white('--- install diff highlight ---', bold=True) if not file_exists('/usr/local/bin/diff-highlight'): run('wget https://raw.githubusercontent.com/git/git/master/contrib/diff-highlight/diff-highlight') with settings(mode_sudo()): run('chmod +x diff-highlight') run('mv diff-highlight /usr/local/bin/diff-highlight')
def install_python(version, force=False): """Install python""" package_ensure('build-essential') package_ensure('libcurl4-openssl-dev') install_dir = os.path.join(_INSTALL_DIR, 'python', version) python_bin = os.path.join(install_dir, 'bin', 'python') if file_exists(python_bin): if not force: puts("Python {0} found, skipping installation".format(version)) return else: puts("Reinstalling Python {0} found".format(version)) src_dir = run('mktemp -d') with cd(src_dir): puts("Downloading python {0}".format(version)) run("wget -q '%s' -O - | tar xz" % PYTHON_DOWNLOAD_URL.format(version=version)) with cd('Python-{0}'.format(version)): puts("Installing python {0}".format(version)) run("./configure --prefix=%s" % install_dir) run("make") sudo('make install') run('rm -rf {0}'.format(src_dir))
def provision_rabbitmq(admin_password): append("/etc/apt/sources.list.d/rabbitmq.list", "deb http://www.rabbitmq.com/debian/ testing main", use_sudo=True) if not file_exists("/usr/sbin/rabbitmq-server"): sudo("wget http://www.rabbitmq.com/rabbitmq-signing-key-public.asc") sudo("apt-key add rabbitmq-signing-key-public.asc") map(package_ensure_apt, ["rabbitmq-server", "rsync"]) dir_ensure("/etc/rabbitmq/rabbitmq.conf.d") put("./conf/bunny/bunny.conf", "/etc/rabbitmq/rabbitmq.conf.d/", use_sudo=True) sudo("chown -R rabbitmq.rabbitmq /srv/rabbitmq") dir_ensure("/srv/rabbitmq/log", owner="rabbitmq", group="rabbitmq") sudo("rm -rf /var/lib/rabbitmq") sudo("rm -rf /var/log/rabbitmq") with mode_sudo(): file_link("/srv/rabbitmq", "/var/lib/rabbitmq", owner="rabbitmq", group="rabbitmq") file_link("/srv/rabbitmq/log", "/var/log/rabbitmq", owner="rabbitmq", group="rabbitmq") sudo("service rabbitmq-server start") sudo("rabbitmq-plugins enable rabbitmq_management") sudo("rabbitmqctl add_user admin " + admin_password) sudo("rabbitmqctl set_user_tags admin administrator") sudo('rabbitmqctl set_permissions -p / admin ".*" ".*" ".*"') sudo("rabbitmqctl delete_user guest") sudo("service rabbitmq-server restart") else: print "skipped install, already have /usr/sbin/rabbitmq-server"
def stage7_midonet_tunnelzone_members(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return cuisine.package_ensure("expect") for container_role in ['container_midonet_gateway', 'container_openstack_compute', 'container_openstack_neutron']: if container_role in metadata.roles: for container in metadata.containers: if container in metadata.roles[container_role]: puts(green("adding container %s as member to tunnel zones" % container)) add_host_to_tunnel_zone(metadata.config["debug"], container, metadata.containers[container]["ip"]) for physical_role in ['physical_midonet_gateway', 'physical_openstack_compute']: if physical_role in metadata.roles: for server in metadata.servers: if server in metadata.roles[physical_role]: puts(green("adding server %s as member to tunnel zones" % server)) # # tinc can only work with MTU 1500 # we could use the approach from http://lartc.org/howto/lartc.cookbook.mtu-mss.html # but instead we will disable rp_filter and use the physical interface ip # # server_ip = "%s.%s" % (metadata.config["vpn_base"], metadata.config["idx"][server]) # server_ip = metadata.servers[server]["ip"] add_host_to_tunnel_zone(metadata.config["debug"], server, server_ip) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def setup_packages(): """ Installs basic Raspbian package requirements. """ puts(green("Installing packages")) package_update() with hide("running"): package_ensure("git-core") package_ensure("mpc") package_ensure("mpd") # Sometimes I use screen, sometimes I use tmux ... package_ensure("screen") package_ensure("tmux") # ... but I always use vim. package_ensure("vim") package_ensure("python-pip") package_ensure("ack-grep") ack_filename = "{}/ack".format(LOCAL_PREFIX) if file_exists(ack_filename): sudo("rm {}".format(ack_filename)) sudo("ln -s /usr/bin/ack-grep {}".format(ack_filename)) install_binary_from_URL( "https://raw.github.com/sjl/friendly-find/master/ffind" )
def ioncube(): if not cuisine.file_exists('/etc/php5/conf.d/000-ioncube_loader.ini'): with cd('/tmp'): cuisine.run('wget http://downloads2.ioncube.com/loader_downloads/ioncube_loaders_lin_x86-64.tar.gz') cuisine.run('tar --directory "/usr/local/" -xzf "/tmp/ioncube_loaders_lin_x86-64.tar.gz"') cuisine.run("echo \"zend_extension=/usr/local/ioncube/ioncube_loader_lin_$(command php --version | command head -n 1 | command cut -c 5-7).so\" | command tee /etc/php5/conf.d/000-ioncube_loader.ini")
def provision_clojure(): """For getting a machine ready to run Clojure""" append("/etc/apt/sources.list.d/oracle_java.list", "deb http://www.duinsoft.nl/pkg debs all", use_sudo=True) if not file_exists("/usr/bin/java"): sudo("apt-key adv --keyserver keys.gnupg.net --recv-keys 5CB26B26") map(package_ensure_apt, ["update-sun-jre", "rsync", "unzip"])
def php(): cuisine.package_ensure([ 'php5', 'php5-cli', 'php5-curl', 'php5-dev', 'php5-gd', 'php5-imap', 'php5-memcached', 'php5-mcrypt', 'php5-mysqlnd', 'php5-sqlite', 'php5-tidy', 'php5-xdebug', 'php5-xmlrpc', 'php5-xsl' ]) if not cuisine.file_exists('/etc/php5/conf.d/custom.ini'): custom = 'date.timezone = "Europe/Paris"\n \ error_reporting = E_ALL\n \ display_errors = On\n \ display_startup_errors = On\n \ short_open_tag = Off' cuisine.file_write('/etc/php5/conf.d/custom.ini', custom, 644, 'root', 'root') if not package_installed('php-pear'): cuisine.package_ensure('php-pear') cuisine.run('pear channel-update pear.php.net') cuisine.run('pear upgrade pear') if package_installed('php5-suhosin'): cuisine.run('apt-get purge php5-suhosin -y')
def setup_packages(): """ Installs basic Raspbian package requirements. """ puts(green("Installing packages")) package_update() with hide("running"): package_ensure("git-core") package_ensure("mpc") package_ensure("mpd") # Sometimes I use screen, sometimes I use tmux ... package_ensure("screen") package_ensure("tmux") # ... but I always use vim. package_ensure("vim") package_ensure("python-pip") package_ensure("ack-grep") ack_filename = "{}/ack".format(LOCAL_PREFIX) if file_exists(ack_filename): sudo("rm {}".format(ack_filename)) sudo("ln -s /usr/bin/ack-grep {}".format(ack_filename)) install_binary_from_URL( "https://raw.github.com/sjl/friendly-find/master/ffind")
def install_setuptools(force=False): """Install setuptools""" py_version = get_config()['version'] easy_install_bin = _python_bin_path(py_version, 'easy_install') if cuisine.file_exists(easy_install_bin): if not force: fab.puts("easy_install for python {0} found, skipping installation".format(py_version)) return else: fab.puts("Reinstalling easy_install for python {0}".format(py_version)) major, minor = py_version.split('.')[0:2] version = "{0}.{1}".format(major, minor) python_bin = _python_bin_path(py_version) src_dir = fab.run('mktemp -d') with fab.cd(src_dir): fab.puts("Downloading setuptools for python {0}".format(version)) download_url = SETUPTOOLS_DOWNLOAD_URL.format(py_version=version) fab.run("wget -q '{0}' -O - | tar xz".format(download_url)) with fab.cd('setuptools-*'): fab.puts("Installing setuptools for python {0}".format(version)) fab.sudo("{0} setup.py install".format(python_bin)) fab.sudo('rm -rf {0}'.format(src_dir))
def setup_i2c(): if cuisine.file_exists('/etc/modprobe.d/raspi-blacklist.conf'): sudo('sed -i -e \'s/^blacklist i2c-bcm2708/# blacklist i2c-bcm2708/g\' ' + '/etc/modprobe.d/raspi-blacklist.conf') else: # RASPBIAN Release 2015-01-31 or later boot_conf = cuisine.text_ensure_line( cuisine.file_read('/boot/config.txt'), 'dtparam=i2c_arm=on' ) cuisine.file_write( location = '/boot/config.txt', content = boot_conf, mode='755', sudo=True ) modules_conf = cuisine.text_ensure_line( cuisine.file_read('/etc/modules'), 'i2c-dev' ) cuisine.file_write( location = '/etc/modules', content = modules_conf, mode='644', sudo=True ) # Repeated Start Condition cuisine.file_write( location = '/etc/modprobe.d/i2c.conf', content = 'options i2c_bcm2708 combined=1\n', mode='644', sudo=True )
def install_setuptools(force=False): """Install setuptools""" py_version = get_config()['version'] easy_install_bin = _python_bin_path(py_version, 'easy_install') if cuisine.file_exists(easy_install_bin): if not force: fab.puts( "easy_install for python {0} found, skipping installation". format(py_version)) return else: fab.puts( "Reinstalling easy_install for python {0}".format(py_version)) major, minor = py_version.split('.')[0:2] version = "{0}.{1}".format(major, minor) python_bin = _python_bin_path(py_version) src_dir = fab.run('mktemp -d') with fab.cd(src_dir): fab.puts("Downloading setuptools for python {0}".format(version)) download_url = SETUPTOOLS_DOWNLOAD_URL.format(py_version=version) fab.run("wget -q '{0}' -O - | tar xz".format(download_url)) with fab.cd('setuptools-*'): fab.puts("Installing setuptools for python {0}".format(version)) fab.sudo("{0} setup.py install".format(python_bin)) fab.sudo('rm -rf {0}'.format(src_dir))
def stage7_midonet_fakeuplinks(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return # provider router has been created now. we can set up the static routing logic. # note that we might also change this role loop to include compute nodes # (for simulating a similar approach like the HP DVR off-ramping directly from the compute nodes) for role in ['container_midonet_gateway']: if role in metadata.roles: for container in metadata.containers: if container in metadata.roles[role]: puts(green("setting up fakeuplink provider router leg for container %s" % container)) physical_ip_idx = int(re.sub(r"\D", "", container)) overlay_ip_idx = 255 - physical_ip_idx # # This logic is the complimentary logic to what happens on the midonet gateways when the veth pair, the fakeuplink bridge and the eth0 SNAT is set up. # We might some day change this to proper BGP peer (which will be in another container or on a different host of course). # run(""" if [[ "%s" == "True" ]] ; then set -x; fi CONTAINER_NAME="%s" FAKEUPLINK_VETH1_IP="%s" FAKEUPLINK_NETWORK="%s.0/24" FAKEUPLINK_VETH0_IP="%s" /usr/bin/expect<<EOF set timeout 10 spawn midonet-cli expect "midonet> " { send "cleart\r" } expect "midonet> " { send "router list name 'MidoNet Provider Router'\r" } expect "midonet> " { send "router router0 add port address ${FAKEUPLINK_VETH1_IP} net ${FAKEUPLINK_NETWORK}\r" } expect "midonet> " { send "port list device router0 address ${FAKEUPLINK_VETH1_IP}\r" } expect "midonet> " { send "host list name ${CONTAINER_NAME}\r" } expect "midonet> " { send "host host0 add binding port router router0 port port0 interface veth1\r" } expect "midonet> " { send "router router0 add route type normal weight 0 src 0.0.0.0/0 dst 0.0.0.0/0 gw ${FAKEUPLINK_VETH0_IP} port port0\r" } expect "midonet> " { send "quit\r" } EOF """ % ( metadata.config["debug"], container, "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fake_transfer_net"], "%s.%s" % (metadata.config["fake_transfer_net"], str(physical_ip_idx)) )) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_api(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return zk = [] for zkhost in sorted(metadata.roles['container_zookeeper']): zk.append("{'ip' => '%s', 'port' => '2181'}" % metadata.containers[zkhost]['ip']) args = {} args['zk_servers'] = "[%s]" % ",".join(zk) args['keystone_auth'] = "true" args['vtep'] = "true" # # slice and dice the password cache so we can access it in python # passwords = {} with open(os.environ["PASSWORDCACHE"]) as passwordcache: for line in passwordcache: name, var = line.partition("=")[::2] passwords[name] = str(var).rstrip('\n') # # this is supposed to be the outer ip, not the container ip, remember HATEOAS # args['api_ip'] = "'%s'" % metadata.servers[metadata.roles["midonet_api"][0]]["ip"] args['api_port'] = "'8081'" args['keystone_host'] = "'%s'" % metadata.containers[metadata.roles["container_openstack_keystone"][0]]["ip"] args['keystone_port'] = "'35357'" args['keystone_admin_token'] = "'%s'" % passwords["export ADMIN_TOKEN"] args['keystone_tenant_name'] = "'admin'" Puppet.apply('midonet::midonet_api', args, metadata) # # in case mock auth was installed: # run(""" sed -i 's,org.midonet.api.auth.MockAuthService,org.midonet.cluster.auth.MockAuthService,g;' /usr/share/midonet-api/WEB-INF/web.xml """) # # wait for the api to come up # puts(green("please wait for midonet-api to come up, this can take a long time!")) run(""" wget -SO- -- http://%s:8081/midonet-api/; echo """ % metadata.servers[metadata.roles["midonet_api"][0]]["ip"]) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_gateway_setup(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return server_idx = int(re.sub(r"\D", "", env.host_string)) overlay_ip_idx = 255 - server_idx run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # fakeuplink logic for midonet gateways without binding a dedicated virtual edge NIC # # this is recommended for silly toy installations only - do not do this in production! # # The idea with the veth-pairs was originally introduced and explained to me from Daniel Mellado. # # Thanks a lot, Daniel! # # this will go into the host-side of the veth pair PHYSICAL_IP="%s" # this will be bound to the provider router OVERLAY_BINDING_IP="%s" FIP_BASE="%s" ip a | grep veth1 || \ ip link add type veth # these two interfaces are basically acting as a virtual RJ45 cross-over patch cable ifconfig veth0 up ifconfig veth1 up # this bridge brings us to the linux kernel routing brctl addbr fakeuplink # this is the physical ip we use for routing (SNATing inside linux) ifconfig fakeuplink "${PHYSICAL_IP}/24" up # this is the physical plug of the veth-pair brctl addif fakeuplink veth0 # veth1 will be used by midonet # change this to the ext range for more authentic testing ip route add ${FIP_BASE}.0/24 via "${OVERLAY_BINDING_IP}" # enable routing echo 1 > /proc/sys/net/ipv4/ip_forward """ % (metadata.config["debug"], "%s.%s" % (metadata.config["fake_transfer_net"], str(server_idx)), "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fip_base"])) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_midonet_fakeuplinks(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return # provider router has been created now. we can set up the static routing logic. # note that we might also change this role loop to include compute nodes # (for simulating a similar approach like the HP DVR off-ramping directly from the compute nodes) for role in ['container_midonet_gateway']: if role in metadata.roles: for container in metadata.containers: if container in metadata.roles[role]: puts( green( "setting up fakeuplink provider router leg for container %s" % container)) physical_ip_idx = int(re.sub(r"\D", "", container)) overlay_ip_idx = 255 - physical_ip_idx # # This logic is the complimentary logic to what happens on the midonet gateways when the veth pair, the fakeuplink bridge and the eth0 SNAT is set up. # We might some day change this to proper BGP peer (which will be in another container or on a different host of course). # run(""" if [[ "%s" == "True" ]] ; then set -x; fi CONTAINER_NAME="%s" FAKEUPLINK_VETH1_IP="%s" FAKEUPLINK_NETWORK="%s.0/24" FAKEUPLINK_VETH0_IP="%s" /usr/bin/expect<<EOF set timeout 10 spawn midonet-cli expect "midonet> " { send "cleart\r" } expect "midonet> " { send "router list name 'MidoNet Provider Router'\r" } expect "midonet> " { send "router router0 add port address ${FAKEUPLINK_VETH1_IP} net ${FAKEUPLINK_NETWORK}\r" } expect "midonet> " { send "port list device router0 address ${FAKEUPLINK_VETH1_IP}\r" } expect "midonet> " { send "host list name ${CONTAINER_NAME}\r" } expect "midonet> " { send "host host0 add binding port router router0 port port0 interface veth1\r" } expect "midonet> " { send "router router0 add route type normal weight 0 src 0.0.0.0/0 dst 0.0.0.0/0 gw ${FAKEUPLINK_VETH0_IP} port port0\r" } expect "midonet> " { send "quit\r" } EOF """ % (metadata.config["debug"], container, "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fake_transfer_net"], "%s.%s" % (metadata.config["fake_transfer_net"], str(physical_ip_idx)))) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def _setup_yum_repository(): cuisine.package_ensure('epel-release') if not cuisine.file_exists('/etc/yum.repos.d/remi.repo'): sudo( 'yum install https://rpms.remirepo.net/enterprise/remi-release-7.rpm -y' ) if not cuisine.file_exists('/etc/yum.repos.d/percona-release.repo'): sudo( 'yum install http://www.percona.com/downloads/percona-release/redhat/0.1-6/percona-release-0.1-6.noarch.rpm -y' ) if not cuisine.file_exists('/etc/yum.repos.d/nginx.repo'): nginx = [ '[nginx]', 'name=nginx repo', 'baseurl=http://nginx.org/packages/centos/7/$basearch/', 'gpgcheck=0', 'enabled=1' ] sudo('echo \'%s\' > /etc/yum.repos.d/nginx.repo' % '\n'.join(nginx))
def install_diff_highlight(): print white('--- install diff highlight ---', bold=True) if not file_exists('/usr/local/bin/diff-highlight'): run('wget https://raw.githubusercontent.com/git/git/master/contrib/diff-highlight/diff-highlight' ) with settings(mode_sudo()): run('chmod +x diff-highlight') run('mv diff-highlight /usr/local/bin/diff-highlight')
def _condition_common_rules(self): time.sleep(1) file_path = "/mnt/etc/udev/rules.d/70-persistent-net.rules" if file_exists(file_path): if not file_is_link(file_path): do_sudo(['rm', '-rf', file_path]) do_sudo(['ln', '-s', '/dev/null', file_path])
def phpunit(): if not cuisine.file_exists('/usr/bin/phpunit'): if package_installed('php-pear'): cuisine.run('pear config-set auto_discover 1') cuisine.run('pear install pear.phpunit.de/PHPUnit') else: print('PhpUnit install failed : PEAR is missing')
def _upload_squashfs(self): # Download snapshot print("Uploading '{f}' to '{h}'...".format( f=self.snapshot_file, h=self.hostname)) if not file_exists(self.squashfs): put(self.snapshot_file, self.squashfs) sudo("sync && echo 3 > /proc/sys/vm/drop_caches")
def link_nginx(): # TODO: would be better as sudo env.user=ROOT_USER env.password=ROOT_PASS if not file_exists("/etc/nginx/sites-enabled/rtf.conf"): with cd("/etc/nginx/sites-enabled"): file_link("/opt/rtd/apps/readthedocs/current/readthedocs.org/conf/nginx.conf", "rtf.conf")
def link_supervisor(): # TODO: would be beter as sudo env.user=ROOT_USER env.password=ROOT_PASS if not file_exists("/etc/supervisor/conf.d/rtf.conf"): with cd("/etc/supervisor/conf.d"): file_link("/opt/rtd/apps/readthedocs/current/readthedocs.org/conf/supervisor.conf", "rtf.conf")
def stage7_install_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return puts(green("installing MidoNet agent on %s" % env.host_string)) zk = [] zkc = [] for zkhost in sorted(metadata.roles['container_zookeeper']): zk.append("{'ip' => '%s', 'port' => '2181'}" % metadata.containers[zkhost]['ip']) zkc.append("%s:2181" % metadata.containers[zkhost]['ip']) cs = [] csc = [] for cshost in sorted(metadata.roles['container_cassandra']): cs.append("'%s'" % metadata.containers[cshost]['ip']) csc.append("%s" % metadata.containers[cshost]['ip']) args = {} args['zk_servers'] = "[%s]" % ",".join(zk) args['cassandra_seeds'] = "[%s]" % ",".join(cs) Puppet.apply('midonet::midonet_agent', args, metadata) # # the midolman.conf that comes with the puppet module is hopelessly broken, we replace it here # run(""" ZK="%s" CS="%s" CS_COUNT="%s" cat >/etc/midolman/midolman.conf<<EOF [zookeeper] zookeeper_hosts = ${ZK} session_timeout = 30000 midolman_root_key = /midonet/v1 session_gracetime = 30000 [cassandra] servers = ${CS} replication_factor = ${CS_COUNT} cluster = midonet EOF """ % (",".join(zkc), ",".join(csc), len(csc))) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def dotdeb(): if not cuisine.file_exists('/etc/apt/sources.list.d/dotdeb.list'): sources = 'deb http://packages.dotdeb.org wheezy all\n \ deb-src http://packages.dotdeb.org wheezy all' cuisine.file_write('/etc/apt/sources.list.d/dotdeb.list', sources, 644, 'root', 'root') with cd('/tmp'): cuisine.run('wget http://www.dotdeb.org/dotdeb.gpg') fabtools.deb.add_apt_key('dotdeb.gpg')
def install_stage5(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return Install(metadata).install() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def testExists(self): try: fd, path = tempfile.mkstemp() f = os.fdopen(fd, 'w') f.write('Hello World!') f.close() assert cuisine.file_exists(path) finally: os.unlink(path)
def _setup_percona_repository(): # https://www.percona.com/doc/percona-server/5.7/installation/apt_repo.html cuisine.package_ensure('wget') if not cuisine.file_exists('/etc/apt/sources.list.d/percona-ps-80-release.list'): run('wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb') sudo('dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb') run('rm percona-release_latest.$(lsb_release -sc)_all.deb') sudo('percona-release setup ps80')
def testExists( self ): try: fd, path = tempfile.mkstemp() f = os.fdopen(fd, 'w') f.write('Hello World!') f.close() assert cuisine.file_exists(path) finally: os.unlink(path)
def _download_iso(self): """ Download base image... """ print("Downloading base image...") time.sleep(1) if not file_exists(self.base_iso): cmd = ['wget', self.base_iso_url, '-O', self.base_iso] sudo(' '.join(cmd))
def stage7_physical_openstack_compute_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return stage7_install_midonet_agent() stage7_start_physical_midonet_agent() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_gateway_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return stage7_install_midonet_agent() stage7_start_container_midonet_agent() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def link_nginx(): # TODO: would be better as sudo env.user = ROOT_USER env.password = ROOT_PASS if not file_exists("/etc/nginx/sites-enabled/rtf.conf"): with cd("/etc/nginx/sites-enabled"): file_link( "/opt/rtd/apps/readthedocs/current/readthedocs.org/conf/nginx.conf", "rtf.conf")
def uninstall_gunicorn(self): with mode_sudo(): with settings(warn_only=True): dir_remove('/var/log/gunicorn', recursive = True) if (self.virtualenv_dir and file_exists(self.virtualenv_dir + '/bin/activate') ): dir_remove(self.virtualenv_dir, recursive = True) if self.util.get_package_manager() == 'apt': dir_remove('/etc/init/gunicorn.conf')
def logrotate(): if cuisine.file_exists('/var/log/nginx/access.log'): sudo( 'cp /var/log/nginx/access.log /var/log/nginx/access_`date +%s`.log' ) sudo('cp /dev/null /var/log/nginx/access.log') if cuisine.file_exists('/var/log/nginx/error.log'): sudo('cp /var/log/nginx/error.log /var/log/nginx/error_`date +%s`.log') sudo('cp /dev/null /var/log/nginx/error.log') if cuisine.file_exists('/var/log/mysql/slow-queries.log'): sudo( 'cp /var/log/mysql/slow-queries.log /var/log/mysql/slow-queries_`date +%s`.log' ) sudo('cp /dev/null /var/log/mysql/slow-queries.log') if cuisine.file_exists('/var/log/nginx/access.log'): sudo('systemctl restart nginx') if cuisine.file_exists('/var/log/mysql/slow-queries.log'): sudo('systemctl restart mysql')
def _setup_kataribe(): cuisine.package_ensure('wget') cuisine.package_ensure('unzip') if not cuisine.file_exists('/usr/local/bin/kataribe'): run('wget https://github.com/matsuu/kataribe/releases/download/v0.4.1/kataribe-v0.4.1_linux_amd64.zip') run('echo "34da63eb1696b964d30d9fac3b6c2778b9797eb4 kataribe-v0.4.1_linux_amd64.zip" | sha1sum -c -') run('unzip kataribe-v0.4.1_linux_amd64.zip kataribe') sudo('mv ./kataribe /usr/local/bin') run('rm kataribe-v0.4.1_linux_amd64.zip')
def link_supervisor(): # TODO: would be beter as sudo env.user = ROOT_USER env.password = ROOT_PASS if not file_exists("/etc/supervisor/conf.d/rtf.conf"): with cd("/etc/supervisor/conf.d"): file_link( "/opt/rtd/apps/readthedocs/current/readthedocs.org/conf/supervisor.conf", "rtf.conf")
def _setup_nginx_repository(): # http://nginx.org/en/linux_packages.html#Ubuntu cuisine.package_ensure('curl') cuisine.package_ensure('gnupg2') cuisine.package_ensure('ca-certificates') cuisine.package_ensure('lsb-release') if not cuisine.file_exists('/etc/apt/sources.list.d/nginx.list'): sudo('echo "deb http://nginx.org/packages/mainline/ubuntu `lsb_release -cs` nginx" | sudo tee /etc/apt/sources.list.d/nginx.list') sudo('curl -fsSL https://nginx.org/keys/nginx_signing.key | apt-key add -') sudo('apt-key fingerprint ABF5BD827BD9BF62')
def nginx_ensure(project_path, name, template, key_env): with mode_sudo(), cd(project_path): package_ensure('nginx') run("cp %s /etc/nginx/sites-available/%s" % (template, name)) file_update('/etc/nginx/sites-available/%s' % name, lambda x: text_template(x,key_env)) if not file_exists("/etc/nginx/sites-enabled/%s" % name): run("ln -s -t /etc/nginx/sites-enabled /etc/nginx/sites-available/%s " % ( name)) file_unlink('/etc/nginx/sites-enabled/default') run("service nginx restart")
def install_python_packages(): print white('--- install python packages ---', bold=True) if not file_exists('/usr/bin/pip'): run('wget https://bootstrap.pypa.io/get-pip.py') with settings(mode_sudo()): run('/usr/local/bin/python2.7 get-pip.py') run('rm get-pip.py') with settings(mode_sudo()): run('ln -sf /usr/local/bin/pip /usr/bin/pip') run('pip install ipython') run('pip install virtualenv') run('pip install Pygments')
def install_pip(): """Install pip latest version.""" py_version = get_config()['version'] fab.puts("Installing pip for python {0}".format(py_version)) easy_install_bin = _python_bin_path(py_version, 'easy_install') if not cuisine.file_exists(easy_install_bin): fab.puts("easy_install for version {0} not found".format(py_version)) return fab.sudo('{0} pip'.format(easy_install_bin))
def stage9_container_openstack_swift_check(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" source /etc/keystone/KEYSTONERC swift list """)
def deploy(commit_msg=None): localpath = os.path.dirname(os.path.realpath(__file__)) if commit_msg: with lcd(localpath): with settings(warn_only=True): local('git commit -am "{commit_msg}"'.format( commit_msg=commit_msg)) with lcd(localpath): with settings(warn_only=True): local('git push') with cd('~'): if not dir_exists('blogging'): run('mkdir blogging') with cd('blogging'): run('git clone git://github.com/imathis/octopress.git') run('git clone git://github.com/tly1980/my_blog.git') with cd('~/blogging/octopress'): with prefix('source ~/.bash_profile'): # install the desire ruby version run('bundle install') with cd('~/blogging/my_blog'): run('git pull') with cd('~/blogging/octopress'): with settings(warn_only=True): run('rm Rakefile _config.yml config.rb source') run('ln -s ../my_blog/Rakefile .') run('ln -s ../my_blog/_config.yml .') run('ln -s ../my_blog/config.rb .') run('ln -s ../my_blog/source .') run('rake generate') with cd('~'): with settings(warn_only=True): sudo('rm -rvf /srv/keyonly.com') sudo('cp -r blogging/octopress/public /srv/keyonly.com') sudo('chmod -R 0755 /srv/keyonly.com') file_write('/etc/nginx/sites-available/keyonly.com', site_cfg, sudo=True) if not file_exists('/etc/nginx/sites-enabled/keyonly.com'): sudo( 'ln -s /etc/nginx/sites-available/keyonly.com /etc/nginx/sites-enabled/keyonly.com' ) upstart_ensure('nginx')