def update_config(cfg_name, cfg_list): """ Update xml files """ lines = [] header = text_strip_margin( """ |<?xml version="1.0"?> |<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |<!-- Put site-specific property overrides in this file. --> | |<configuration> |""") lines.append(header) for entry in cfg_list: property = text_strip_margin( """ | <property> | <name>{0}</name> | <value>{1}</value> | </property> |""".format(entry, cfg_list[entry])) lines.append(property) footer = '</configuration>\n' lines.append(footer) file = '/usr/lib/hadoop/conf/' + cfg_name + '.xml' text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True)
def tinc_stage3_private_key(): run("mkdir -pv /etc/tinc") cuisine.file_write("/etc/tinc/nets.boot", metadata.config["domain"]) run(""" DOMAIN="%s" mkdir -pv "/etc/tinc/${DOMAIN}/hosts" for CMD in up down; do touch "/etc/tinc/${DOMAIN}/tinc-${CMD}" chmod 0755 "/etc/tinc/${DOMAIN}/tinc-${CMD}" done PRIVKEY="/etc/tinc/${DOMAIN}/rsa_key.priv" """ % metadata.config["domain"]) cuisine.file_upload( "/etc/tinc/%s/rsa_key.priv" % metadata.config["domain"], "%s/%s/rsa_key.priv" % (os.environ["TMPDIR"], env.host_string)) run(""" chmod 0600 /etc/tinc/%s/rsa_key.priv """ % metadata.config["domain"])
def _condition_ubuntu_mtab(self): time.sleep(1) file_path = "/mnt/etc/mtab" label_type = self.disk_config['label_type'] device = self.disk_config['device'] system_format = self.disk_config['system']['format'] if label_type == 'mbr': num = 0 elif label_type == 'gpt': num = 1 text = text_strip_margin(""" |{device}{system_num} / {system_format} rw,errors=remount-ro 0 0 |proc /proc proc rw,noexec,nosuid,nodev 0 0 |sysfs /sys sysfs rw,noexec,nosuid,nodev 0 0 |none /sys/fs/fuse/connections fusectl rw 0 0 |none /sys/kernel/debug debugfs rw 0 0 |none /sys/kernel/security securityfs rw 0 0 |udev /dev devtmpfs rw,mode=0755 0 0 |devpts /dev/pts devpts rw,noexec,nosuid,gid=5,mode=0620 0 0 |tmpfs /run tmpfs rw,noexec,nosuid,size=10%,mode=0755 0 0 |none /run/lock tmpfs rw,noexec,nosuid,nodev,size=5242880 0 0 |none /run/shm tmpfs rw,nosuid,nodev 0 0 |rpc_pipefs /run/rpc_pipefs rpc_pipefs rw 0 0 |""".format(device=device, system_num=num+2, system_format=system_format)) with mode_sudo(): file_write(file_path, text)
def ensure_production_settings(): notify('Configuring production settings.') context = SENSITIVE cuisine.mode_sudo() content = cuisine.text_template(templates.production_settings, context) cuisine.file_write(PROJECT['ROOT'] + '/moment/conf/production.py', content) restart()
def resolv_conf(): ''' Add our nameserver to /etc/resolv.conf ''' print(green("Setting up nameserver %s on %s" \ % (env.nameserver, env.host_string))) file_write("/etc/resolv.conf", "nameserver %s" % env.nameserver)
def disabled_hello_systemvm_cuisine(self): """Test we can run cuisine on the systemvm""" file_write('/tmp/run_cuisine', '\n\nsuccess!\n') found, context = has_line('/tmp/run_cuisine', 'success!') if not found: print_doc('/tmp/cuisine', context) assert found, '/tmp/run_cuisine should contain "success!"'
def stage7_midonet_tunnelzone_members(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return cuisine.package_ensure("expect") for container_role in ['container_midonet_gateway', 'container_openstack_compute', 'container_openstack_neutron']: if container_role in metadata.roles: for container in metadata.containers: if container in metadata.roles[container_role]: puts(green("adding container %s as member to tunnel zones" % container)) add_host_to_tunnel_zone(metadata.config["debug"], container, metadata.containers[container]["ip"]) for physical_role in ['physical_midonet_gateway', 'physical_openstack_compute']: if physical_role in metadata.roles: for server in metadata.servers: if server in metadata.roles[physical_role]: puts(green("adding server %s as member to tunnel zones" % server)) # # tinc can only work with MTU 1500 # we could use the approach from http://lartc.org/howto/lartc.cookbook.mtu-mss.html # but instead we will disable rp_filter and use the physical interface ip # # server_ip = "%s.%s" % (metadata.config["vpn_base"], metadata.config["idx"][server]) # server_ip = metadata.servers[server]["ip"] add_host_to_tunnel_zone(metadata.config["debug"], server, server_ip) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def append_line_to_file(self, tag, add_line, filepath): ''' Append a line to a file on the remote filesystem if it's not there already. Look for the tag to see if the line is there already, in case the existing line has different spacing or tabbing than the new line. :type tag: string :param tag: tag to look for in existing lines :type add_line: string :param add_line: line to append to file :type filepath: string :param filepath: fully-qualified path to remote file ''' old_contents = cuisine.file_read(filepath) eol = cuisine.text_detect_eol(old_contents) old_contents = old_contents.rstrip(eol) old_contents = old_contents.split(eol) has_line = False for line in old_contents: print line if line.find(tag) != -1: has_line = True continue if not has_line: old_contents.append(add_line) cuisine.file_write(filepath, eol.join(old_contents) + eol)
def ensure_production_settings(): notify("Configuring production settings.") context = SENSITIVE cuisine.mode_sudo() content = cuisine.text_template(templates.production_settings, context) cuisine.file_write(PROJECT["ROOT"] + "/moment/conf/production.py", content) restart()
def _condition_centos_fstab(self): time.sleep(1) file_path = "/mnt/etc/fstab" label_type = self.disk_config['label_type'] device = self.disk_config['device'] system_format = self.disk_config['system']['format'] if label_type == 'mbr': num = 0 elif label_type == 'gpt': num = 1 text = text_strip_margin(""" |#TEEFAA-BEGIN |# /etc/fstab |# Created by Teefaa |# |# Accessible filesystems, by reference, are maintained under '/dev/disk' |# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info |# |{device}{system_num} / {system_format} defaults 1 1 |{device}{swap_num} swap swap defaults 0 0 |tmpfs /dev/shm tmpfs defaults 0 0 |devpts /dev/pts devpts gid=5,mode=620 0 0 |sysfs /sys sysfs defaults 0 0 |proc /proc proc defaults 0 0 |#TEEFAA-END |""".format(device=device, swap_num=num+1, system_num=num+2, system_format=system_format)) with mode_sudo(): file_write(file_path, text)
def _condition_ubuntu_fstab(self): time.sleep(1) file_path = "/mnt/etc/fstab" label_type = self.disk_config['label_type'] device = self.disk_config['device'] system_format = self.disk_config['system']['format'] if label_type == 'mbr': num = 0 elif label_type == 'gpt': num = 1 text = text_strip_margin(""" |#TEEFAA-BEGIN |# /etc/fstab: static file system information. |# |# Use 'blkid' to print the universally unique identifier for a |# device; this may be used with UUID= as a more robust way to name devices |# that works even if disks are added and removed. See fstab(5). |# |# <file system> <mount point> <type> <options> <dump> <pass> |proc /proc proc nodev,noexec,nosuid 0 0 |{device}{swap_num} none swap sw 0 0 |{device}{system_num} / {system_format} errors=remount-ro 0 1 |#TEEFAA-END |""".format(device=device, swap_num=num+1, system_num=num+2, system_format=system_format)) with mode_sudo(): file_write(file_path, text)
def php(): cuisine.package_ensure([ 'php5', 'php5-cli', 'php5-curl', 'php5-dev', 'php5-gd', 'php5-imap', 'php5-memcached', 'php5-mcrypt', 'php5-mysqlnd', 'php5-sqlite', 'php5-tidy', 'php5-xdebug', 'php5-xmlrpc', 'php5-xsl' ]) if not cuisine.file_exists('/etc/php5/conf.d/custom.ini'): custom = 'date.timezone = "Europe/Paris"\n \ error_reporting = E_ALL\n \ display_errors = On\n \ display_startup_errors = On\n \ short_open_tag = Off' cuisine.file_write('/etc/php5/conf.d/custom.ini', custom, 644, 'root', 'root') if not package_installed('php-pear'): cuisine.package_ensure('php-pear') cuisine.run('pear channel-update pear.php.net') cuisine.run('pear upgrade pear') if package_installed('php5-suhosin'): cuisine.run('apt-get purge php5-suhosin -y')
def stage7_container_midonet_gateway_setup(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return server_idx = int(re.sub(r"\D", "", env.host_string)) overlay_ip_idx = 255 - server_idx run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # fakeuplink logic for midonet gateways without binding a dedicated virtual edge NIC # # this is recommended for silly toy installations only - do not do this in production! # # The idea with the veth-pairs was originally introduced and explained to me from Daniel Mellado. # # Thanks a lot, Daniel! # # this will go into the host-side of the veth pair PHYSICAL_IP="%s" # this will be bound to the provider router OVERLAY_BINDING_IP="%s" FIP_BASE="%s" ip a | grep veth1 || \ ip link add type veth # these two interfaces are basically acting as a virtual RJ45 cross-over patch cable ifconfig veth0 up ifconfig veth1 up # this bridge brings us to the linux kernel routing brctl addbr fakeuplink # this is the physical ip we use for routing (SNATing inside linux) ifconfig fakeuplink "${PHYSICAL_IP}/24" up # this is the physical plug of the veth-pair brctl addif fakeuplink veth0 # veth1 will be used by midonet # change this to the ext range for more authentic testing ip route add ${FIP_BASE}.0/24 via "${OVERLAY_BINDING_IP}" # enable routing echo 1 > /proc/sys/net/ipv4/ip_forward """ % (metadata.config["debug"], "%s.%s" % (metadata.config["fake_transfer_net"], str(server_idx)), "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fip_base"])) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def cloudstack_init(fqdn=None): puts(green('CloudStack Initialize')) if fqdn is None: abort('Please set FQDN\n\tex) $ fab .... rolename:"fqdn"') # 石川さんごめんなさい sudo('sed -i -e "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config') sudo('setenforce permissive') # Repository for CloudStack repository = '[cloudstack]\nname=cloudstack\n' repository += 'baseurl=http://cloudstack.apt-get.eu/rhel/4.2/\n' repository += 'enabled=1\n' repository += 'gpgcheck=0\n' cuisine.file_write('/etc/yum.repos.d/CloudStack.repo', repository) # Setting FQDN if not fqdn in cuisine.file_read('/etc/hosts'): sudo('sed -i -e "s/localhost/' + fqdn + ' localhost/" /etc/hosts') # NTP install_package('ntp') cuisine.upstart_ensure('ntpd') sudo('chkconfig ntpd on') download_and_upload('ntp/%s-ntp.conf', '/etc/ntp.conf', abort_flg=False) puts(green('Success')) #--------------------------------------# # NFS # #--------------------------------------# def nfs(directory): puts(green('Setup NFS')) if fqdn is None: abort('Please set Directory\n\tex) $ fab .... nfsrolename:"/etc/hogehoge"') # Install install_package('nfs-utils') # Start for name in [ 'rpcbind', 'nfs' ]: cuisine.upstart_ensure(name) sudo('chkconfig %s on' % name) # Create Directory cuisine.dir_ensure(directory, recursive=True) # Setting /etc/exports download_and_upload('nfs/%s-exports', '/etc/exports') sudo('exportfs -a') # Setting /etc/sysconfig/nfs download_and_upload('nfs/%s-nfs', '/etc/sysconfig/nfs') puts(green('Success'))
def stage7_midonet_fakeuplinks(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return # provider router has been created now. we can set up the static routing logic. # note that we might also change this role loop to include compute nodes # (for simulating a similar approach like the HP DVR off-ramping directly from the compute nodes) for role in ['container_midonet_gateway']: if role in metadata.roles: for container in metadata.containers: if container in metadata.roles[role]: puts( green( "setting up fakeuplink provider router leg for container %s" % container)) physical_ip_idx = int(re.sub(r"\D", "", container)) overlay_ip_idx = 255 - physical_ip_idx # # This logic is the complimentary logic to what happens on the midonet gateways when the veth pair, the fakeuplink bridge and the eth0 SNAT is set up. # We might some day change this to proper BGP peer (which will be in another container or on a different host of course). # run(""" if [[ "%s" == "True" ]] ; then set -x; fi CONTAINER_NAME="%s" FAKEUPLINK_VETH1_IP="%s" FAKEUPLINK_NETWORK="%s.0/24" FAKEUPLINK_VETH0_IP="%s" /usr/bin/expect<<EOF set timeout 10 spawn midonet-cli expect "midonet> " { send "cleart\r" } expect "midonet> " { send "router list name 'MidoNet Provider Router'\r" } expect "midonet> " { send "router router0 add port address ${FAKEUPLINK_VETH1_IP} net ${FAKEUPLINK_NETWORK}\r" } expect "midonet> " { send "port list device router0 address ${FAKEUPLINK_VETH1_IP}\r" } expect "midonet> " { send "host list name ${CONTAINER_NAME}\r" } expect "midonet> " { send "host host0 add binding port router router0 port port0 interface veth1\r" } expect "midonet> " { send "router router0 add route type normal weight 0 src 0.0.0.0/0 dst 0.0.0.0/0 gw ${FAKEUPLINK_VETH0_IP} port port0\r" } expect "midonet> " { send "quit\r" } EOF """ % (metadata.config["debug"], container, "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fake_transfer_net"], "%s.%s" % (metadata.config["fake_transfer_net"], str(physical_ip_idx)))) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_api(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return zk = [] for zkhost in sorted(metadata.roles['container_zookeeper']): zk.append("{'ip' => '%s', 'port' => '2181'}" % metadata.containers[zkhost]['ip']) args = {} args['zk_servers'] = "[%s]" % ",".join(zk) args['keystone_auth'] = "true" args['vtep'] = "true" # # slice and dice the password cache so we can access it in python # passwords = {} with open(os.environ["PASSWORDCACHE"]) as passwordcache: for line in passwordcache: name, var = line.partition("=")[::2] passwords[name] = str(var).rstrip('\n') # # this is supposed to be the outer ip, not the container ip, remember HATEOAS # args['api_ip'] = "'%s'" % metadata.servers[metadata.roles["midonet_api"][0]]["ip"] args['api_port'] = "'8081'" args['keystone_host'] = "'%s'" % metadata.containers[metadata.roles["container_openstack_keystone"][0]]["ip"] args['keystone_port'] = "'35357'" args['keystone_admin_token'] = "'%s'" % passwords["export ADMIN_TOKEN"] args['keystone_tenant_name'] = "'admin'" Puppet.apply('midonet::midonet_api', args, metadata) # # in case mock auth was installed: # run(""" sed -i 's,org.midonet.api.auth.MockAuthService,org.midonet.cluster.auth.MockAuthService,g;' /usr/share/midonet-api/WEB-INF/web.xml """) # # wait for the api to come up # puts(green("please wait for midonet-api to come up, this can take a long time!")) run(""" wget -SO- -- http://%s:8081/midonet-api/; echo """ % metadata.servers[metadata.roles["midonet_api"][0]]["ip"]) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_midonet_fakeuplinks(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return # provider router has been created now. we can set up the static routing logic. # note that we might also change this role loop to include compute nodes # (for simulating a similar approach like the HP DVR off-ramping directly from the compute nodes) for role in ['container_midonet_gateway']: if role in metadata.roles: for container in metadata.containers: if container in metadata.roles[role]: puts(green("setting up fakeuplink provider router leg for container %s" % container)) physical_ip_idx = int(re.sub(r"\D", "", container)) overlay_ip_idx = 255 - physical_ip_idx # # This logic is the complimentary logic to what happens on the midonet gateways when the veth pair, the fakeuplink bridge and the eth0 SNAT is set up. # We might some day change this to proper BGP peer (which will be in another container or on a different host of course). # run(""" if [[ "%s" == "True" ]] ; then set -x; fi CONTAINER_NAME="%s" FAKEUPLINK_VETH1_IP="%s" FAKEUPLINK_NETWORK="%s.0/24" FAKEUPLINK_VETH0_IP="%s" /usr/bin/expect<<EOF set timeout 10 spawn midonet-cli expect "midonet> " { send "cleart\r" } expect "midonet> " { send "router list name 'MidoNet Provider Router'\r" } expect "midonet> " { send "router router0 add port address ${FAKEUPLINK_VETH1_IP} net ${FAKEUPLINK_NETWORK}\r" } expect "midonet> " { send "port list device router0 address ${FAKEUPLINK_VETH1_IP}\r" } expect "midonet> " { send "host list name ${CONTAINER_NAME}\r" } expect "midonet> " { send "host host0 add binding port router router0 port port0 interface veth1\r" } expect "midonet> " { send "router router0 add route type normal weight 0 src 0.0.0.0/0 dst 0.0.0.0/0 gw ${FAKEUPLINK_VETH0_IP} port port0\r" } expect "midonet> " { send "quit\r" } EOF """ % ( metadata.config["debug"], container, "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fake_transfer_net"], "%s.%s" % (metadata.config["fake_transfer_net"], str(physical_ip_idx)) )) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def testSHA1( self ): content = "Hello World!" path = "/tmp/cuisine.test" cuisine.file_write(path, content, check=False) sig = cuisine.file_sha256(path) with file(path) as f: file_sig = hashlib.sha256(f.read()).hexdigest() assert sig == file_sig
def testWrite( self ): content = "Hello World!" path = "/tmp/cuisine.test" cuisine.file_write(path, content, check=False) assert os.path.exists(path) with file(path) as f: assert f.read() == content os.unlink(path)
def testSHA1(self): content = "Hello World!" path = "/tmp/cuisine.test" cuisine.file_write(path, content, check=False) sig = cuisine.file_sha256(path) with file(path) as f: file_sig = hashlib.sha256(f.read()).hexdigest() assert sig == file_sig
def testWrite(self): content = "Hello World!" path = "/tmp/cuisine.test" cuisine.file_write(path, content, check=False) assert os.path.exists(path) with file(path) as f: assert f.read() == content os.unlink(path)
def stage7_install_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return puts(green("installing MidoNet agent on %s" % env.host_string)) zk = [] zkc = [] for zkhost in sorted(metadata.roles['container_zookeeper']): zk.append("{'ip' => '%s', 'port' => '2181'}" % metadata.containers[zkhost]['ip']) zkc.append("%s:2181" % metadata.containers[zkhost]['ip']) cs = [] csc = [] for cshost in sorted(metadata.roles['container_cassandra']): cs.append("'%s'" % metadata.containers[cshost]['ip']) csc.append("%s" % metadata.containers[cshost]['ip']) args = {} args['zk_servers'] = "[%s]" % ",".join(zk) args['cassandra_seeds'] = "[%s]" % ",".join(cs) Puppet.apply('midonet::midonet_agent', args, metadata) # # the midolman.conf that comes with the puppet module is hopelessly broken, we replace it here # run(""" ZK="%s" CS="%s" CS_COUNT="%s" cat >/etc/midolman/midolman.conf<<EOF [zookeeper] zookeeper_hosts = ${ZK} session_timeout = 30000 midolman_root_key = /midonet/v1 session_gracetime = 30000 [cassandra] servers = ${CS} replication_factor = ${CS_COUNT} cluster = midonet EOF """ % (",".join(zkc), ",".join(csc), len(csc))) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def dotdeb(): if not cuisine.file_exists('/etc/apt/sources.list.d/dotdeb.list'): sources = 'deb http://packages.dotdeb.org wheezy all\n \ deb-src http://packages.dotdeb.org wheezy all' cuisine.file_write('/etc/apt/sources.list.d/dotdeb.list', sources, 644, 'root', 'root') with cd('/tmp'): cuisine.run('wget http://www.dotdeb.org/dotdeb.gpg') fabtools.deb.add_apt_key('dotdeb.gpg')
def install_stage5(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return Install(metadata).install() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def setup_databases(): sql = [] for db in dbs: sql.append("CREATE DATABASE IF NOT EXISTS %s;" % db) sql.append("GRANT ALL ON %s.* TO '%s'@'localhost';" % (db, db)) tmp = cuisine.tempfile.mktemp() cuisine.file_write(tmp, sql) run("mysql -u root -pape < %s" % tmp) run("rm %s" % tmp)
def update_config(self, config): config_json = json.dumps(config, indent=2) #print_doc('config.json', config_json) file_write('/var/cache/cloud/update_config_test.json', config_json) with hide("everything"): result = run("python /opt/cloud/bin/update_config.py update_config_test.json", timeout=600, warn_only=True) assert result.succeeded, 'update_config.py ran without errors' assert result.find("Convergence is achieved") >= 0, 'update_config.py should report convergence'
def _condition_centos_hostname(self): file_path = "/mnt/etc/sysconfig/network" text = text_strip_margin(""" |NETWORKING=yes |HOSTNAME={h} |""".format(h=self.hostname)) with mode_sudo(): file_write(file_path, text)
def _update_grub_conf(self): device = self.device rootp = self.rootp distro = self.distro cmd = ['ls', '-t1', '/mnt/boot/vmlinuz*'] output = sudo(' '.join(cmd)) kernel = output.split('\n')[0].split('/')[3].rstrip('\r') cmd = ['ls', '-t1', '/mnt/boot/initramfs*'] output = sudo(' '.join(cmd)) ramdisk = output.split('\n')[0].split('/')[3].rstrip('\r') if device == '/dev/sda': hd = 'hd0' elif device == '/dev/sdb': hd = 'hd1' elif device == '/dev/sdc': hd = 'hd2' else: raise TypeError, "the device is not support." num = rootp - 1 kernel_options = ['rd_NO_LUKS', 'LANG=en_US.UTF-8', 'rd_NO_MD', 'SYSFONT=latarcyrheb-sun16', 'crashkernel=auto', 'KEYBOARDTYPE=pc', 'KEYTABLE=us', 'rd_NO_DM', 'notsc', 'clocksource=acpi_pm'] file_path = "/mnt/boot/grub/grub.conf" text = text_strip_margin(""" |# grub.conf generated by Teefaa |#boot={device} |default=0 |timeout=5 |splashimage=({hd},{num})/boot/grub/splash.xpm.gz |hiddenmenu |title {distro} ({kernel}) | root ({hd},{num}) | kernel /boot/{kernel} ro root={device}{rootp} {options} | initrd /boot/{ramdisk} |""".format( device=device, hd=hd, num=num, kernel=kernel, ramdisk=ramdisk, distro=distro, rootp=rootp, options=' '.join(kernel_options))) with mode_sudo(): file_write(file_path, text, mode=600)
def stage7_physical_openstack_compute_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return stage7_install_midonet_agent() stage7_start_physical_midonet_agent() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_gateway_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return stage7_install_midonet_agent() stage7_start_container_midonet_agent() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def config(): """Ensures the app configuration is in place.""" utilities.notify(u'Ensuring the app configuration settings.') context = env cuisine.mode_sudo() content = cuisine.text_template(env.app_config_template, context) cuisine.file_write(env.app_config_file, content) execute(restart)
def apache_site_ensure(name, config, root_dir=None): '''Configures and loads an apache site config''' root_dir = root_dir or join('/var/www', name) with cuisine.mode_sudo(): cuisine.dir_ensure(root_dir, recursive=True) cuisine.file_write('/etc/apache2/sites-available/{}'.format(name), config, sudo=True) if not cuisine.file_exists('/etc/apache2/sites-enabled/{}'.format(name)): cuisine.sudo('a2ensite {}'.format(name)) cuisine.sudo('service apache2 reload')
def update_etc_hosts(cfg_hosts): """Update /etc/hosts """ file = '/etc/hosts' lines = [] lines.append("127.0.0.1 localhost") for host in cfg_hosts: lines.append("{0} {1}".format(cfg_hosts[host]['ipaddr'], host)) text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True)
def sudo_file_write(filename, contents): """ (Over)write a file as root. This is a substitute for fabric"s file_write for writing global configuration files. """ with hide("output", "running"): temp_file = run("mktemp") file_write(temp_file, contents) sudo("cp -r {s} {d}".format(s=temp_file, d=filename)) sudo("chmod 644 {}".format(filename)) sudo("rm {}".format(temp_file))
def _condition_ubuntu_network(self): text = text_strip_margin(""" |# This file describes the network interfaces available on your system |# and how to activate them. For more information, see interfaces(5). | |# The loopback network interface |auto lo |iface lo inet loopback |""") file_path = "/mnt/etc/network/interfaces" with mode_sudo(): file_write(file_path, text) for iface in self.interfaces['add']: bootp = self.interfaces['add'][iface]['bootp'] if bootp == 'dhcp': text = text_strip_margin(""" |# {iface} |auto {iface} |iface {iface} inet dhcp |""".format(iface=iface)) with mode_sudo(): file_append(file_path, text) elif bootp == 'static': address = self.interfaces['add'][iface]['address'] netmask = self.interfaces['add'][iface]['netmask'] text = text_strip_margin(""" |# {iface} |auto {iface} |iface {iface} inet static | address {addr} | netmask {mask} |""".format(iface=iface, addr=address, mask=netmask)) with mode_sudo(): file_append(file_path, text) try: gateway = self.interfaces['add'][iface]['gateway'] except: gateway = None if gateway: text = " gateway {g}\n".format(g=gateway) with mode_sudo(): file_append(file_path, text) try: dnsserver = self.interfaces['add'][iface]['dnsserver'] except: dnsserver = None if dnsserver: text = " dns-nameservers {d}\n".format(d=dnsserver) with mode_sudo(): file_append(file_path, text) else: raise TypeError("network_config: {0} is not supported.\n".format(iface))
def config_puppet(): ''' Ensure the server directive is in puppet.conf ''' print(green("Writing puppet config file")) config_file = '/etc/puppet/puppet.conf' line1 = "[agent]" line2 = "server = %s" % env.puppet_server config_content = file_read(config_file) updated_config = text_ensure_line(config_content, line1, line2) file_write(config_file, updated_config)
def deploy(commit_msg=None): localpath = os.path.dirname(os.path.realpath(__file__)) if commit_msg: with lcd(localpath): with settings(warn_only=True): local('git commit -am "{commit_msg}"'.format( commit_msg=commit_msg)) with lcd(localpath): with settings(warn_only=True): local('git push') with cd('~'): if not dir_exists('blogging'): run('mkdir blogging') with cd('blogging'): run('git clone git://github.com/imathis/octopress.git') run('git clone git://github.com/tly1980/my_blog.git') with cd('~/blogging/octopress'): with prefix('source ~/.bash_profile'): # install the desire ruby version run('bundle install') with cd('~/blogging/my_blog'): run('git pull') with cd('~/blogging/octopress'): with settings(warn_only=True): run('rm Rakefile _config.yml config.rb source') run('ln -s ../my_blog/Rakefile .') run('ln -s ../my_blog/_config.yml .') run('ln -s ../my_blog/config.rb .') run('ln -s ../my_blog/source .') run('rake generate') with cd('~'): with settings(warn_only=True): sudo('rm -rvf /srv/keyonly.com') sudo('cp -r blogging/octopress/public /srv/keyonly.com') sudo('chmod -R 0755 /srv/keyonly.com') file_write('/etc/nginx/sites-available/keyonly.com', site_cfg, sudo=True) if not file_exists('/etc/nginx/sites-enabled/keyonly.com'): sudo( 'ln -s /etc/nginx/sites-available/keyonly.com /etc/nginx/sites-enabled/keyonly.com' ) upstart_ensure('nginx')
def ensure_rq(): notify('Configuring RQ.') context = { 'ACTION_DATE': MACHINE['ACTION_DATE'], 'NAME': PROJECT['NAME'], 'KEY': KEY, 'PROJECT_ROOT': PROJECT['ROOT'], 'PROJECT_ENV': PROJECT['ENV'], } cuisine.mode_sudo() content = cuisine.text_template(templates.rq_supervisor, context) cuisine.file_write('/etc/supervisor/conf.d/' + KEY + '-rq.conf', content) restart()
def haproxy(): if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return haproxy_default_config() restart = 0 if env.host_string in metadata.roles['openstack_keystone']: restart = 1 for port in [5000, 35357]: haproxy_into_container(port, port, 'container_openstack_keystone') if env.host_string in metadata.roles['openstack_controller']: restart = 1 for port in [6080, 8774]: haproxy_into_container(port, port, 'container_openstack_controller') if env.host_string in metadata.roles['openstack_glance']: restart = 1 haproxy_into_container(9292, 9292, 'container_openstack_glance') if env.host_string in metadata.roles['openstack_neutron']: restart = 1 haproxy_into_container(9696, 9696, 'container_openstack_neutron') if env.host_string in metadata.roles['midonet_api']: restart = 1 for port in [8081, 8459, 8460, 8088]: haproxy_into_container(port, port, 'container_midonet_api') if env.host_string in metadata.roles['openstack_horizon']: restart = 1 haproxy_into_container(80, 80, 'container_openstack_horizon') if env.host_string in metadata.roles['midonet_manager']: restart = 1 haproxy_into_container(81, 80, 'container_midonet_manager') if restart == 1: run(""" service haproxy restart ps axufwwwwwwww | grep -v grep | grep haproxy """) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def tinc_stage3_public_keys(): for server in metadata.servers: if server <> env.host_string: cuisine.file_write( "/etc/tinc/%s/hosts/%s" % (metadata.config["domain"], server), """ Address = %s Compression = 9 %s """ % (metadata.servers[server]["ip"], open('%s/%s/rsa_key.priv.pub' % (os.environ["TMPDIR"], server), 'r').read()))
def stage7_physical_midonet_gateway_setup(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" ip link show | grep 'state DOWN' | awk '{print $2;}' | sed 's,:,,g;' | xargs -n1 --no-run-if-empty ip link set up dev ip a """) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def check_tinc_stage3(): if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return puts(green("checking if the local tinc is up")) run(""" VPN_BASE="%s" LOCAL_IP="${VPN_BASE}.%s" ping -c3 "${LOCAL_IP}" """ % (metadata.config["vpn_base"], metadata.config["idx"][env.host_string])) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_cli(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return cuisine.package_ensure([ "python-midonetclient", "python-keystoneclient", "python-glanceclient", "python-novaclient", "python-neutronclient" ]) run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # initialize the password cache # %s API_IP="%s" API_URI="%s" OPENSTACK_RELEASE="%s" source /etc/keystone/KEYSTONERC_ADMIN 2>/dev/null || source /etc/keystone/admin-openrc.sh if [[ "kilo" == "${OPENSTACK_RELEASE}" || "liberty" == "${OPENSTACK_RELEASE}" ]]; then ADMIN_TENANT_ID="$(openstack project list --format csv | sed 's,",,g;' | grep -v ^ID | grep ',admin' | awk -F',' '{print $1;}' | xargs -n1 echo)" else ADMIN_TENANT_ID="$(keystone tenant-list | grep admin | awk -F'|' '{print $2;}' | xargs -n1 echo)" fi cat >/root/.midonetrc<<EOF [cli] api_url = http://${API_IP}:${API_URI} username = admin password = ${ADMIN_PASS} tenant = ${ADMIN_TENANT_ID} project_id = admin EOF """ % (metadata.config["debug"], open(os.environ["PASSWORDCACHE"]).read(), metadata.containers[metadata.roles["container_midonet_api"][0]]["ip"], metadata.services["midonet"]["internalurl"], metadata.config["openstack_release"])) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_zookeeper(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return puts(green("installing zookeeper on %s" % env.host_string)) zk = [] zkid = 1 myid = 1 for zkhost in sorted(metadata.roles["container_zookeeper"]): zk.append("{'id' => '%s', 'host' => '%s'}" % (zkid, metadata.containers[zkhost]['ip'])) if env.host_string == zkhost: # then this is our id myid = zkid zkid = zkid + 1 args = {} args['servers'] = "[%s]" % ",".join(zk) args['server_id'] = "%s" % myid Puppet.apply('midonet::zookeeper', args, metadata) run("service zookeeper stop; service zookeeper start") Daemon.poll('org.apache.zookeeper.server.quorum', 600) for zkhost in sorted(metadata.roles['container_zookeeper']): run(""" IP="%s" echo ruok | nc "${IP}" 2181 | grep imok """ % metadata.containers[zkhost]['ip']) # # TODO status check for 'not serving requests' # cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_midonet_tunnelzone_members(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return cuisine.package_ensure("expect") for container_role in [ 'container_midonet_gateway', 'container_openstack_compute', 'container_openstack_neutron' ]: if container_role in metadata.roles: for container in metadata.containers: if container in metadata.roles[container_role]: puts( green("adding container %s as member to tunnel zones" % container)) add_host_to_tunnel_zone( metadata.config["debug"], container, metadata.containers[container]["ip"]) for physical_role in [ 'physical_midonet_gateway', 'physical_openstack_compute' ]: if physical_role in metadata.roles: for server in metadata.servers: if server in metadata.roles[physical_role]: puts( green("adding server %s as member to tunnel zones" % server)) # # tinc can only work with MTU 1500 # we could use the approach from http://lartc.org/howto/lartc.cookbook.mtu-mss.html # but instead we will disable rp_filter and use the physical interface ip # # server_ip = "%s.%s" % (metadata.config["vpn_base"], metadata.config["idx"][server]) # server_ip = metadata.servers[server]["ip"] add_host_to_tunnel_zone(metadata.config["debug"], server, server_ip) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def install_upstart(): """Install nginx upstart config.""" version = get_config()['version'] install_dir = os.path.join(_INSTALL_DIR, 'nginx', version) nginx_bin = os.path.join(install_dir, 'sbin', 'nginx') nginx_pid = os.path.join(install_dir, 'logs', 'nginx.pid') context = { 'nginx_bin': nginx_bin, 'nginx_pid': nginx_pid, } nginx_tpl = os.path.join(ETC_DIR, 'init', 'nginx.conf') tpl_content = cuisine.file_local_read(nginx_tpl) content = cuisine.text_template(tpl_content, context) with cuisine.mode_sudo(): cuisine.file_write('/etc/init/nginx.conf', content)
def ensure_nginx(): notify('Configuring nginx.') context = { 'ACTION_DATE': MACHINE['ACTION_DATE'], 'NAME': PROJECT['NAME'], 'KEY': KEY, 'APP_LOCATION': PROJECT['APP_LOCATION'], 'APP_PORT': PROJECT['APP_PORT'], 'LOCATION': MACHINE['LOCATION'], 'PORT': MACHINE['PORT'], 'PROJECT_ROOT': PROJECT['ROOT'], 'ACCESS_LOG': PROJECT['LOGS']['NGINX_ACCESS'], 'ERROR_LOG': PROJECT['LOGS']['NGINX_ERROR'], 'SERVER_NAMES': ' '.join(PROJECT['DOMAINS']) } cuisine.mode_sudo() content = cuisine.text_template(templates.nginx, context) cuisine.file_write('/etc/nginx/sites-enabled/' + KEY, content) sudo('/etc/init.d/nginx restart')
def setup_users(): '''Add web runner group and users''' puts(green('Creating users and groups')) orig_user, orig_passw, orig_cert = env.user, env.password, env.key_filename env.user, env.password, env.key_filename = \ SSH_SUDO_USER , SSH_SUDO_PASSWORD, SSH_SUDO_CERT cuisine.group_ensure(WEB_RUNNER_GROUP) cuisine.user_ensure( WEB_RUNNER_USER, gid=WEB_RUNNER_GROUP, shell='/bin/bash', passwd=WEB_RUNNER_PASSWORD, encrypted_passwd=False, ) # Create the ssh certificate for web_runner user rem_ssh_deploy_cert_file = '~%s/.ssh/authorized_keys' % WEB_RUNNER_USER rem_ssh_priv_cert_file = '~%s/.ssh/id_rsa' % WEB_RUNNER_USER rem_ssh_pub_cert_file = '~%s/.ssh/id_rsa.pub' % WEB_RUNNER_USER ssh_config_file = '~%s/.ssh/config' % WEB_RUNNER_USER if orig_cert and not sudo('test -e %s && echo OK ; true' % (rem_ssh_deploy_cert_file, )).endswith("OK"): sudo('mkdir -p ~%s/.ssh' % WEB_RUNNER_USER) sudo('chmod 700 ~%s/.ssh' % WEB_RUNNER_USER) deploy_cert = open(LOCAL_CERT_PATH + os.sep + 'web_runner_rsa.pub', 'r').read() priv_cert = open(LOCAL_CERT_PATH + os.sep + 'web_runner_user_rsa', 'r').read() pub_cert = open(LOCAL_CERT_PATH + os.sep + 'web_runner_user_rsa.pub', 'r').read() ssh_config = 'Host bitbucket.org\n\tStrictHostKeyChecking no' cuisine.file_write('/tmp/deploy_cert', deploy_cert) cuisine.file_write('/tmp/priv_cert', priv_cert) cuisine.file_write('/tmp/pub_cert', pub_cert) cuisine.file_write('/tmp/ssh_config', ssh_config) sudo('mv /tmp/deploy_cert ' + rem_ssh_deploy_cert_file) sudo('mv /tmp/priv_cert ' + rem_ssh_priv_cert_file) sudo('mv /tmp/pub_cert ' + rem_ssh_pub_cert_file) sudo('mv /tmp/ssh_config ' + ssh_config_file) sudo('chmod 600 %s' % rem_ssh_deploy_cert_file) sudo('chmod 600 %s' % rem_ssh_priv_cert_file) sudo('chmod 600 %s' % rem_ssh_pub_cert_file) sudo('chown -R %s:%s ~%s/.ssh/' % (WEB_RUNNER_USER, WEB_RUNNER_GROUP, WEB_RUNNER_USER)) env.user, env.password, env.key_filename = \ orig_user, orig_passw, orig_cert
def tinc_stage3_networking(): cuisine.file_write( "/etc/tinc/%s/tinc.conf" % metadata.config["domain"], """ Name = %s Mode = switch AddressFamily = ipv4 """ % env.host_string) # # local routing # cuisine.file_write( "/etc/tinc/%s/tinc-up" % metadata.config["domain"], """#!/bin/bash VPN_BASE="%s" LOCAL_IP="${VPN_BASE}.%s" BROADCAST="${VPN_BASE}.255" NETMASK="255.255.255.0" TINC_INTERFACE="dockertinc" LOCAL_TINC_IP="%s" TINC_NETWORK="%s" TINC_BROADCAST="%s" TINC_NETMASK="%s" ifconfig "${INTERFACE}" "${LOCAL_IP}" netmask "${NETMASK}" echo 1 >/proc/sys/net/ipv4/ip_forward brctl show | grep "${TINC_INTERFACE}" || brctl addbr "${TINC_INTERFACE}" ifconfig "${TINC_INTERFACE}" "${LOCAL_TINC_IP}" netmask "${TINC_NETMASK}" ifconfig "${TINC_INTERFACE}" up """ % (metadata.config["vpn_base"], metadata.config["idx"][env.host_string], CIDR(metadata.servers[env.host_string]["dockernet"])[1], CIDR(metadata.servers[env.host_string]["dockernet"])[0], CIDR(metadata.servers[env.host_string]["dockernet"]).broadcast, CIDR(metadata.servers[env.host_string]["dockernet"]).netmask))
def tinc_stage3(): if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return tinc_stage3_private_key() tinc_stage3_public_keys() tinc_stage3_networking() tinc_stage3_networking_shutdown() tinc_stage3_routing() tinc_stage3_fip_routing() tinc_stage3_daemon() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def put_site_conf(nginx_file, context=None): """Install nginx config per site.""" version = get_config()['version'] if not os.path.exists(nginx_file): fab.abort("Nginx conf {0} not found".format(nginx_file)) site_name = os.path.basename(nginx_file) install_dir = os.path.join(_INSTALL_DIR, 'nginx', version) conf_file = os.path.join(install_dir, 'conf', 'sites-enabled', site_name) if context: tpl_content = open(nginx_file, 'rb').read() content = cuisine.text_template(tpl_content, context) with cuisine.mode_sudo(): cuisine.file_write(conf_file, content) else: with cuisine.mode_sudo(): cuisine.file_upload(conf_file, nginx_file)
def put_conf(nginx_file): """Install global nginx config.""" version = get_config()['version'] install_dir = os.path.join(_INSTALL_DIR, 'nginx', version) conf_file = os.path.join(install_dir, 'conf', 'nginx.conf') if not os.path.exists(nginx_file): fab.abort("Nginx conf {0} not found".format(nginx_file)) nginx_pid = os.path.join(install_dir, 'logs', 'nginx.pid') context = { 'nginx_user': NGINX_USER, 'nginx_pid': nginx_pid, } tpl_content = open(nginx_file, 'rb').read() content = cuisine.text_template(tpl_content, context) with cuisine.mode_sudo(): cuisine.file_write(conf_file, content)
def stage7_midonet_tunnelzones(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # create tunnel zones # midonet-cli -e 'tunnel-zone list name gre' | \ grep '^tzone' | grep 'name gre type gre' || \ midonet-cli -e 'tunnel-zone create name gre type gre' midonet-cli -e 'tunnel-zone list name vtep' | \ grep '^tzone' | grep 'name vtep type vtep' || \ midonet-cli -e 'tunnel-zone create name vtep type vtep' """ % metadata.config["debug"]) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")