def stage9_container_openstack_swift_automate_all_the_startups(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" chown -R swift:swift /srv/node chown -R swift:swift /etc/swift chown -R swift:swift /var/cache/swift chown -R swift:swift /var/run/swift """) run(""" service memcached restart """) run(""" swift-init all stop || true service memcached restart echo stats | nc localhost 11211 | grep 'STAT uptime' swift-init all start """)
def sshconfig(): metadata = Config(os.environ["CONFIGFILE"]) Orizuru(metadata).sshconfig() # do not remove this. sys.exit(0)
def stage7_midonet_fakeuplinks(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return # provider router has been created now. we can set up the static routing logic. # note that we might also change this role loop to include compute nodes # (for simulating a similar approach like the HP DVR off-ramping directly from the compute nodes) for role in ['container_midonet_gateway']: if role in metadata.roles: for container in metadata.containers: if container in metadata.roles[role]: puts( green( "setting up fakeuplink provider router leg for container %s" % container)) physical_ip_idx = int(re.sub(r"\D", "", container)) overlay_ip_idx = 255 - physical_ip_idx # # This logic is the complimentary logic to what happens on the midonet gateways when the veth pair, the fakeuplink bridge and the eth0 SNAT is set up. # We might some day change this to proper BGP peer (which will be in another container or on a different host of course). # run(""" if [[ "%s" == "True" ]] ; then set -x; fi CONTAINER_NAME="%s" FAKEUPLINK_VETH1_IP="%s" FAKEUPLINK_NETWORK="%s.0/24" FAKEUPLINK_VETH0_IP="%s" /usr/bin/expect<<EOF set timeout 10 spawn midonet-cli expect "midonet> " { send "cleart\r" } expect "midonet> " { send "router list name 'MidoNet Provider Router'\r" } expect "midonet> " { send "router router0 add port address ${FAKEUPLINK_VETH1_IP} net ${FAKEUPLINK_NETWORK}\r" } expect "midonet> " { send "port list device router0 address ${FAKEUPLINK_VETH1_IP}\r" } expect "midonet> " { send "host list name ${CONTAINER_NAME}\r" } expect "midonet> " { send "host host0 add binding port router router0 port port0 interface veth1\r" } expect "midonet> " { send "router router0 add route type normal weight 0 src 0.0.0.0/0 dst 0.0.0.0/0 gw ${FAKEUPLINK_VETH0_IP} port port0\r" } expect "midonet> " { send "quit\r" } EOF """ % (metadata.config["debug"], container, "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fake_transfer_net"], "%s.%s" % (metadata.config["fake_transfer_net"], str(physical_ip_idx)))) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_gateway_setup(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return server_idx = int(re.sub(r"\D", "", env.host_string)) overlay_ip_idx = 255 - server_idx run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # fakeuplink logic for midonet gateways without binding a dedicated virtual edge NIC # # this is recommended for silly toy installations only - do not do this in production! # # The idea with the veth-pairs was originally introduced and explained to me from Daniel Mellado. # # Thanks a lot, Daniel! # # this will go into the host-side of the veth pair PHYSICAL_IP="%s" # this will be bound to the provider router OVERLAY_BINDING_IP="%s" FIP_BASE="%s" ip a | grep veth1 || \ ip link add type veth # these two interfaces are basically acting as a virtual RJ45 cross-over patch cable ifconfig veth0 up ifconfig veth1 up # this bridge brings us to the linux kernel routing brctl addbr fakeuplink # this is the physical ip we use for routing (SNATing inside linux) ifconfig fakeuplink "${PHYSICAL_IP}/24" up # this is the physical plug of the veth-pair brctl addif fakeuplink veth0 # veth1 will be used by midonet # change this to the ext range for more authentic testing ip route add ${FIP_BASE}.0/24 via "${OVERLAY_BINDING_IP}" # enable routing echo 1 > /proc/sys/net/ipv4/ip_forward """ % (metadata.config["debug"], "%s.%s" % (metadata.config["fake_transfer_net"], str(server_idx)), "%s.%s" % (metadata.config["fake_transfer_net"], str(overlay_ip_idx)), metadata.config["fip_base"])) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def clean_lockfiles(): metadata = Config(os.environ["CONFIGFILE"]) puts(red("cleaning lockfiles from /tmp dir on %s" % env.host_string)) run(""" rm -rfv /tmp/.*.lck """)
def stage7_install_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return puts(green("installing MidoNet agent on %s" % env.host_string)) zk = [] zkc = [] for zkhost in sorted(metadata.roles['container_zookeeper']): zk.append("{'ip' => '%s', 'port' => '2181'}" % metadata.containers[zkhost]['ip']) zkc.append("%s:2181" % metadata.containers[zkhost]['ip']) cs = [] csc = [] for cshost in sorted(metadata.roles['container_cassandra']): cs.append("'%s'" % metadata.containers[cshost]['ip']) csc.append("%s" % metadata.containers[cshost]['ip']) args = {} args['zk_servers'] = "[%s]" % ",".join(zk) args['cassandra_seeds'] = "[%s]" % ",".join(cs) Puppet.apply('midonet::midonet_agent', args, metadata) # # the midolman.conf that comes with the puppet module is hopelessly broken, we replace it here # run(""" ZK="%s" CS="%s" CS_COUNT="%s" cat >/etc/midolman/midolman.conf<<EOF [zookeeper] zookeeper_hosts = ${ZK} session_timeout = 30000 midolman_root_key = /midonet/v1 session_gracetime = 30000 [cassandra] servers = ${CS} replication_factor = ${CS_COUNT} cluster = midonet EOF """ % (",".join(zkc), ",".join(csc), len(csc))) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def install_stage5(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return Install(metadata).install() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage2(): metadata = Config(os.environ["CONFIGFILE"]) env.warn_only = True env.connection_attempts = 2 env.timeout = 2 env.skip_bad_hosts = True execute(reboot_stage2) env.warn_only = False
def stage7_container_midonet_gateway_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return stage7_install_midonet_agent() stage7_start_container_midonet_agent() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_physical_openstack_compute_midonet_agent(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return stage7_install_midonet_agent() stage7_start_physical_midonet_agent() cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def poweroff_stage2(): metadata = Config(os.environ["CONFIGFILE"]) puts(red("powering down %s" % env.host_string)) run(""" poweroff sleep 30 """)
def stage10(): metadata = Config(os.environ["CONFIGFILE"]) puts(yellow("adding ssh connections to local known hosts file")) for server in metadata.servers: puts(green("connecting to %s now and adding the key" % server)) local("ssh -o StrictHostKeyChecking=no root@%s uptime" % metadata.servers[server]["ip"]) if 'vtep' in metadata.roles: execute(stage10_vtep) execute(stage10_container_midonet_cli_vtep)
def stage9_container_openstack_swift_check(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" source /etc/keystone/KEYSTONERC swift list """)
def stage7_mn_conf(): metadata = Config(os.environ["CONFIGFILE"]) cshosts = [] for container in sorted(metadata.roles["container_cassandra"]): cshosts.append("%s:9042" % metadata.containers[container]["ip"]) # # since 1.9.1 (and OSS 2015.3) all runtime config is hidden behind mn-conf # run(""" CSHOSTS="%s" CSCOUNT="%i" cat >/tmp/cassandra.json<<EOF cassandra { servers = "${CSHOSTS}" replication_factor = ${CSCOUNT} cluster = midonet } EOF mn-conf set -t default < /tmp/cassandra.json """ % (",".join(cshosts), len(cshosts))) # # haproxy needs to be turned on for L4LB # run(""" cat >/tmp/health.json<<EOF agent { "haproxy_health_monitor" { # zookeeper://midonet/v1/config/schemas/agent: 62 "haproxy_file_loc"="/etc/midolman/l4lb/" # zookeeper://midonet/v1/config/schemas/agent: 63 "health_monitor_enable"=true # zookeeper://midonet/v1/config/schemas/agent: 65 "namespace_cleanup"=false } } EOF mn-conf set -t default < /tmp/health.json """)
def stage5(): metadata = Config(os.environ["CONFIGFILE"]) puts(yellow("adding ssh connections to local known hosts file")) for server in metadata.servers: puts(green("connecting to %s now and adding the key" % server)) local("ssh -o StrictHostKeyChecking=no root@%s uptime" % metadata.servers[server]["ip"]) puts(yellow("executing stage5 configure")) execute(configure_stage5) puts(yellow("executing stage5 install")) execute(install_stage5)
def stage9(): metadata = Config(os.environ["CONFIGFILE"]) puts(yellow("adding ssh connections to local known hosts file")) for server in metadata.servers: puts(green("connecting to %s now and adding the key" % server)) local("ssh -o StrictHostKeyChecking=no root@%s uptime" % metadata.servers[server]["ip"]) if 'physical_openstack_compute' in metadata.roles: execute(stage9_container_openstack_swift) execute(stage9_container_openstack_swift_seed_rings) execute(stage9_container_openstack_swift_automate_all_the_startups) execute(stage9_container_openstack_swift_check)
def stage7_physical_midonet_gateway_setup(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" ip link show | grep 'state DOWN' | awk '{print $2;}' | sed 's,:,,g;' | xargs -n1 --no-run-if-empty ip link set up dev ip a """) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_midonet_cli(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return cuisine.package_ensure([ "python-midonetclient", "python-keystoneclient", "python-glanceclient", "python-novaclient", "python-neutronclient" ]) run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # initialize the password cache # %s API_IP="%s" API_URI="%s" OPENSTACK_RELEASE="%s" source /etc/keystone/KEYSTONERC_ADMIN 2>/dev/null || source /etc/keystone/admin-openrc.sh if [[ "kilo" == "${OPENSTACK_RELEASE}" || "liberty" == "${OPENSTACK_RELEASE}" ]]; then ADMIN_TENANT_ID="$(openstack project list --format csv | sed 's,",,g;' | grep -v ^ID | grep ',admin' | awk -F',' '{print $1;}' | xargs -n1 echo)" else ADMIN_TENANT_ID="$(keystone tenant-list | grep admin | awk -F'|' '{print $2;}' | xargs -n1 echo)" fi cat >/root/.midonetrc<<EOF [cli] api_url = http://${API_IP}:${API_URI} username = admin password = ${ADMIN_PASS} tenant = ${ADMIN_TENANT_ID} project_id = admin EOF """ % (metadata.config["debug"], open(os.environ["PASSWORDCACHE"]).read(), metadata.containers[metadata.roles["container_midonet_api"][0]]["ip"], metadata.services["midonet"]["internalurl"], metadata.config["openstack_release"])) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_zookeeper(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return puts(green("installing zookeeper on %s" % env.host_string)) zk = [] zkid = 1 myid = 1 for zkhost in sorted(metadata.roles["container_zookeeper"]): zk.append("{'id' => '%s', 'host' => '%s'}" % (zkid, metadata.containers[zkhost]['ip'])) if env.host_string == zkhost: # then this is our id myid = zkid zkid = zkid + 1 args = {} args['servers'] = "[%s]" % ",".join(zk) args['server_id'] = "%s" % myid Puppet.apply('midonet::zookeeper', args, metadata) run("service zookeeper stop; service zookeeper start") Daemon.poll('org.apache.zookeeper.server.quorum', 600) for zkhost in sorted(metadata.roles['container_zookeeper']): run(""" IP="%s" echo ruok | nc "${IP}" 2181 | grep imok """ % metadata.containers[zkhost]['ip']) # # TODO status check for 'not serving requests' # cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage5_ping_containers(): metadata = Config(os.environ["CONFIGFILE"]) for container in sorted(metadata.containers): container_ip = metadata.containers[container]["ip"] run(""" IP="%s" for i in $(seq 1 120); do ping -c1 "${IP}" && break sleep 1 done ping -c1 "${IP}" """ % container_ip)
def stage7_midonet_tunnelzone_members(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return cuisine.package_ensure("expect") for container_role in [ 'container_midonet_gateway', 'container_openstack_compute', 'container_openstack_neutron' ]: if container_role in metadata.roles: for container in metadata.containers: if container in metadata.roles[container_role]: puts( green("adding container %s as member to tunnel zones" % container)) add_host_to_tunnel_zone( metadata.config["debug"], container, metadata.containers[container]["ip"]) for physical_role in [ 'physical_midonet_gateway', 'physical_openstack_compute' ]: if physical_role in metadata.roles: for server in metadata.servers: if server in metadata.roles[physical_role]: puts( green("adding server %s as member to tunnel zones" % server)) # # tinc can only work with MTU 1500 # we could use the approach from http://lartc.org/howto/lartc.cookbook.mtu-mss.html # but instead we will disable rp_filter and use the physical interface ip # # server_ip = "%s.%s" % (metadata.config["vpn_base"], metadata.config["idx"][server]) # server_ip = metadata.servers[server]["ip"] add_host_to_tunnel_zone(metadata.config["debug"], server, server_ip) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def cleanup(): metadata = Config(os.environ["CONFIGFILE"]) execute(fabric_docker_rm_role_containers_and_cleanup) for server in sorted(metadata.servers): for role in sorted(metadata.roles): if role <> 'all_servers': if server in metadata.roles[role]: local( "ssh-keygen -f ${HOME}/.ssh/known_hosts -R %s_%s; true" % (server, role)) local( "ssh-keygen -f ${HOME}/.ssh/known_hosts -R %s.%s; true" % (server, role)) for role in sorted(metadata.roles): if role <> 'all_servers': local("ssh-keygen -f ${HOME}/.ssh/known_hosts -R %s; true" % role)
def stage7(): metadata = Config(os.environ["CONFIGFILE"]) puts(yellow("adding ssh connections to local known hosts file")) for server in metadata.servers: puts(green("connecting to %s now and adding the key" % server)) local("ssh -o StrictHostKeyChecking=no root@%s uptime" % metadata.servers[server]["ip"]) # # network state database # execute(stage7_container_zookeeper) execute(stage7_container_cassandra) if 'physical_midonet_gateway' in metadata.roles: execute(stage7_physical_midonet_gateway_midonet_agent) execute(stage7_physical_midonet_gateway_setup) if 'container_midonet_gateway' in metadata.roles: execute(stage7_container_midonet_gateway_midonet_agent) execute(stage7_container_midonet_gateway_setup) if 'physical_openstack_compute' in metadata.roles: execute(stage7_physical_openstack_compute_midonet_agent) if 'container_openstack_compute' in metadata.roles: execute(stage7_container_openstack_compute_midonet_agent) execute(stage7_container_openstack_neutron_midonet_agent) execute(stage7_container_midonet_api) execute(stage7_container_midonet_manager) execute(stage7_container_midonet_cli) execute(stage7_midonet_tunnelzones) execute(stage7_midonet_tunnelzone_members) execute(stage7_neutron_networks) execute(stage7_midonet_fakeuplinks) execute(stage7_test_connectivity)
def stage10_container_midonet_cli_vtep(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return cuisine.package_ensure("expect") compute_ip = "%s.%s" % (metadata.config["vpn_base"], metadata.config["idx"][metadata.roles["vtep"][0]]) # # set up the connection to the vtep and set up the binding inside midonet-cli # run(""" IP="%s" PORT="%s" /usr/bin/expect<<EOF set timeout 10 spawn midonet-cli expect "midonet> " { send "tunnel-zone list name vtep\r" } expect "midonet> " { send "vtep add management-ip ${IP} management-port 6262 tunnel-zone tzone0\r" } expect "midonet> " { send "quit\r" } EOF sleep 10 ID="$(midonet-cli -e 'list bridge name internal' | awk '{print $2;}')" /usr/bin/expect<<EOF set timeout 10 spawn midonet-cli expect "midonet> " { send "vtep management-ip ${IP} binding add network-id ${ID} physical-port ${PORT} vlan 0\r" } expect "midonet> " { send "quit\r" } EOF """ % (compute_ip, metadata.config["vtep_port"]))
def stage7_midonet_tunnelzones(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # create tunnel zones # midonet-cli -e 'tunnel-zone list name gre' | \ grep '^tzone' | grep 'name gre type gre' || \ midonet-cli -e 'tunnel-zone create name gre type gre' midonet-cli -e 'tunnel-zone list name vtep' | \ grep '^tzone' | grep 'name vtep type vtep' || \ midonet-cli -e 'tunnel-zone create name vtep type vtep' """ % metadata.config["debug"]) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def stage7_container_cassandra(): metadata = Config(os.environ["CONFIGFILE"]) if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name): return puts(green("installing cassandra on %s" % env.host_string)) cs = [] for cshost in metadata.roles['container_cassandra']: cs.append("'%s'" % metadata.containers[cshost]['ip']) args = {} args['seeds'] = "[%s]" % ",".join(cs) args['seed_address'] = "'%s'" % metadata.containers[env.host_string]['ip'] Puppet.apply('midonet::cassandra', args, metadata) Daemon.poll('org.apache.cassandra.service.CassandraDaemon', 600) cuisine.file_write("/tmp/.%s.lck" % sys._getframe().f_code.co_name, "xoxo")
def cleancontainerlocks(): metadata = Config(os.environ["CONFIGFILE"]) execute(clean_lockfiles_from_containers)
def fabric_docker_rm_role_containers_and_cleanup(): metadata = Config(os.environ["CONFIGFILE"]) for container in sorted(metadata.containers): if env.host_string == metadata.containers[container]["server"]: puts( yellow("destroying container %s on %s" % (container, env.host_string))) run(""" SERVER_NAME="%s" CONTAINER_ROLE="%s" TEMPLATE_NAME="template_${SERVER_NAME}" for CONTAINER in $(docker ps | grep "${CONTAINER_ROLE}_${SERVER_NAME}" | awk '{print $1;}' | grep -v CONTAINER); do docker kill $CONTAINER || true; docker rm -f $CONTAINER || true; done docker images | grep "${TEMPLATE_NAME}" && docker rmi -f "${TEMPLATE_NAME}" || true; rm -fv /var/run/netns/docker_*_"${SERVER_NAME}" """ % (env.host_string, container)) run(""" rm -fv /etc/haproxy/haproxy.cfg rm -fv /etc/newrelic/nrsysmond.cfg rm -fv /etc/apt/sources.list.d/cloudarchive* rm -fv /etc/apt/sources.list.d/newrelic* rm -fv /etc/apt/sources.list.d/mido* # apt-get update 1>/dev/null """) puts(red("destroying all virsh images")) run(""" virsh list --all | grep instance | awk '{print $2;}' | xargs -n1 --no-run-if-empty virsh destroy || echo virsh list --all | grep instance | awk '{print $2;}' | xargs -n1 --no-run-if-empty virsh undefine || echo exit 0 """) puts(red("destroying all containers")) run(""" DOMAIN="%s" /etc/init.d/openvswitch-switch stop /etc/init.d/openvswitch-vtep stop rm -f /etc/openvswitch/vtep.db rm -f /etc/openvswitch/conf.db rm -rfv /etc/rc.local.d mkdir -pv /etc/rc.local.d rm -rf "/etc/tinc/${DOMAIN}" mkdir -pv "/etc/tinc/${DOMAIN}/hosts" pidof tincd | xargs -n1 --no-run-if-empty kill -9 ifconfig dockertinc down || true brctl delbr dockertinc || true iptables -t nat --flush iptables -P INPUT ACCEPT iptables -P FORWARD ACCEPT iptables -P OUTPUT ACCEPT iptables --flush docker ps --no-trunc -aq | xargs -n1 --no-run-if-empty docker rm -f docker images | grep '^<none>' | awk '{print $3}' | xargs -n1 --no-run-if-empty docker rmi -f # # this will restore the iptables NAT rules for docker build # service docker.io restart rm -fv /etc/newrelic/nrsysmond.cfg sync exit 0 """ % metadata.config["domain"])
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys from orizuru.config import Config from fabric.api import * from fabric.operations import reboot from fabric.colors import red from fabric.utils import puts Config(os.environ["CONFIGFILE"]) @parallel @roles('all_servers') def wifi(): run(""" ifconfig wlan0 up iwconfig wlan0 essid Funkturm dhclient wlan0 ping -c3 192.168.4.1
def zonefile(): metadata = Config(os.environ["CONFIGFILE"]) Orizuru(metadata).zonefile() sys.exit(0)