def copy_apiserver_ssl_certs_to_node(*nodes): ssl_certs = (get_apiserver_certfile(), get_apiserver_cafile(), get_apiserver_keyfile(), get_apiserver_cert_bundle()) cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) if ssl_cert.endswith('.key'): cert_file = '/etc/contrail/ssl/private/%s' % os.path.basename(ssl_cert) if node not in env.roledefs['cfgm']: # Clear old certificate sudo('rm -f %s' % cert_file) if exists(cert_file, use_sudo=True): continue with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") sudo("mkdir -p /etc/contrail/ssl/private/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) with settings(warn_only=True): sudo("chown -R contrail:contrail /etc/contrail/ssl")
def setup_keystone_ssl_certs_node(*nodes): default_certfile = '/etc/keystone/ssl/certs/keystone.pem' default_keyfile = '/etc/keystone/ssl/private/keystone.key' default_cafile = '/etc/keystone/ssl/certs/keystone_ca.pem' keystonecertbundle = get_keystone_cert_bundle() ssl_certs = ((get_keystone_certfile(), default_certfile), (get_keystone_keyfile(), default_keyfile), (get_keystone_cafile(), default_cafile)) index = env.roledefs['openstack'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % keystonecertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: openstack_host = env.roledefs['openstack'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating keystone SSL certs in first openstack node" subject_alt_names_mgmt = [hstr_to_ip(host) for host in env.roledefs['openstack']] subject_alt_names_ctrl = [hstr_to_ip(get_control_host_string(host)) for host in env.roledefs['openstack']] subject_alt_names = subject_alt_names_mgmt + subject_alt_names_ctrl if get_openstack_external_vip(): subject_alt_names.append(get_openstack_external_vip()) sudo('create-keystone-ssl-certs.sh %s %s' % ( get_openstack_internal_vip() or hstr_to_ip(get_control_host_string(openstack_host)), ','.join(subject_alt_names))) else: with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first openstack" sleep(0.1) print "Get SSL cert(%s) from first openstack" % ssl_cert tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) openstack node" % env.host_string sudo('mkdir -p /etc/keystone/ssl/certs/') sudo('mkdir -p /etc/keystone/ssl/private/') put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in openstack node" % ssl_cert pass else: raise RuntimeError("%s doesn't exists locally or in openstack node") if not exists(keystonecertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, keystonecertbundle)) sudo("chown -R keystone:keystone /etc/keystone/ssl")
def copy_apiserver_ssl_certs_to_node(*nodes): ssl_certs = (get_apiserver_certfile(), get_apiserver_cafile(), get_apiserver_keyfile(), get_apiserver_cert_bundle()) cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) if ssl_cert.endswith('.key'): cert_file = '/etc/contrail/ssl/private/%s' % os.path.basename(ssl_cert) if node not in env.roledefs['cfgm']: # Clear old certificate sudo('rm -f %s' % cert_file) if exists(cert_file, use_sudo=True): continue with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") sudo("mkdir -p /etc/contrail/ssl/private/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) with settings(warn_only=True): sudo("chown -R contrail:contrail /etc/contrail/ssl")
def fixup_restart_haproxy_in_collector_node(*args): contrail_analytics_api_server_lines = '' space = ' ' * 3 for host_string in env.roledefs['collector']: server_index = env.roledefs['collector'].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) contrail_analytics_api_server_lines +=\ '%s server %s %s:9081 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) for host_string in env.roledefs['collector']: haproxy_config = collector_haproxy.template.safe_substitute({ '__contrail_analytics_api_backend_servers__': contrail_analytics_api_server_lines, '__contrail_hap_user__': 'haproxy', '__contrail_hap_passwd__': 'contrail123', }) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local( "sed -i -e '/^#contrail-collector-marker-start/,/^#contrail-collector-marker-end/d' %s" % (tmp_fname)) local( "sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" % (tmp_fname)) local( "sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local( "sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, 'a') cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" % (tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") enable_haproxy() sudo("service haproxy restart")
def sync_keystone_ssl_certs_node(*args): for host_string in args: temp_dir= tempfile.mkdtemp() with settings(host_string=env.roledefs['openstack'][0], password=get_env_passwords(env.roledefs['openstack'][0])): get_as_sudo('/etc/keystone/ssl/', temp_dir) with settings(host_string=host_string, password=get_env_passwords(host_string)): put('%s/ssl/' % temp_dir, '/etc/keystone/', use_sudo=True) sudo('service keystone restart')
def sync_keystone_ssl_certs(): host_string = env.host_string temp_dir= tempfile.mkdtemp() with settings(host_string=env.roledefs['openstack'][0], password=env.passwords[env.roledefs['openstack'][0]]): get_as_sudo('/etc/keystone/ssl/', temp_dir) with settings(host_string=host_string, password=env.passwords[host_string]): put('%s/ssl/' % temp_dir, '/etc/keystone/', use_sudo=True) sudo('service keystone restart')
def sync_keystone_ssl_certs(): host_string = env.host_string temp_dir = tempfile.mkdtemp() with settings(host_string=env.roledefs["openstack"][0], password=get_env_passwords(env.roledefs["openstack"][0])): get_as_sudo("/etc/keystone/ssl/", temp_dir) with settings(host_string=host_string, password=get_env_passwords(host_string)): put("%s/ssl/" % temp_dir, "/etc/keystone/", use_sudo=True) sudo("service keystone restart")
def sync_keystone_ssl_certs(): host_string = env.host_string temp_dir = tempfile.mkdtemp() with settings(host_string=env.roledefs['openstack'][0], password=env.passwords[env.roledefs['openstack'][0]]): get_as_sudo('/etc/keystone/ssl/', temp_dir) with settings(host_string=host_string, password=env.passwords[host_string]): put('%s/ssl/' % temp_dir, '/etc/keystone/', use_sudo=True) sudo('service keystone restart')
def validate_and_copy_file(filename, source_host): with settings(host_string='%s' %(source_host), warn_only=True, abort_on_prompts=False): if exists(filename): filedir = os.path.dirname(filename) if not os.path.exists(filedir): os.makedirs(filedir) get_as_sudo(filename, filename) return filename return ""
def copy_vnc_api_lib_ini_to_node(*nodes): vnc_api_lib = '/etc/contrail/vnc_api_lib.ini' cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_fname = os.path.join('/tmp', os.path.basename(vnc_api_lib)) get_as_sudo(vnc_api_lib, tmp_fname) put(tmp_fname, vnc_api_lib, use_sudo=True)
def copy_vnc_api_lib_ini_to_node(*nodes): vnc_api_lib = '/etc/contrail/vnc_api_lib.ini' cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(vnc_api_lib)) get_as_sudo(vnc_api_lib, tmp_fname) put(tmp_fname, vnc_api_lib, use_sudo=True)
def fixup_restart_haproxy_in_collector_node(*args): contrail_analytics_api_server_lines = "" space = " " * 3 for host_string in env.roledefs["collector"]: server_index = env.roledefs["collector"].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) contrail_analytics_api_server_lines += "%s server %s %s:9081 check inter 2000 rise 2 fall 3\n" % ( space, host_ip, host_ip, ) for host_string in env.roledefs["collector"]: haproxy_config = collector_haproxy.template.safe_substitute( { "__contrail_analytics_api_backend_servers__": contrail_analytics_api_server_lines, "__contrail_hap_user__": "haproxy", "__contrail_hap_passwd__": "contrail123", } ) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local( "sed -i -e '/^#contrail-collector-marker-start/,/^#contrail-collector-marker-end/d' %s" % (tmp_fname) ) local("sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" % (tmp_fname)) local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local("sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, "a") cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" % (tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") enable_haproxy() sudo("service haproxy restart")
def setup_apiserver_ssl_certs_node(*nodes): default_certfile = '/etc/contrail/ssl/certs/contrail.pem' default_keyfile = '/etc/contrail/ssl/private/contrail.key' default_cafile = '/etc/contrail/ssl/certs/contrail_ca.pem' contrailcertbundle = get_apiserver_cert_bundle() ssl_certs = ((get_apiserver_certfile(), default_certfile), (get_apiserver_keyfile(), default_keyfile), (get_apiserver_cafile(), default_cafile)) index = env.roledefs['cfgm'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % contrailcertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: cfgm_host = env.roledefs['cfgm'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating apiserver SSL certs in first cfgm node" cfgm_ip = get_contrail_internal_vip() or hstr_to_ip(get_control_host_string(cfgm_host)) sudo('create-api-ssl-certs.sh %s' % cfgm_ip) else: with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first cfgm" sleep(0.1) print "Get SSL cert(%s) from first cfgm" % ssl_cert tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) cfgm node" % env.host_string sudo('mkdir -p /etc/contrail/ssl/certs/') sudo('mkdir -p /etc/contrail/ssl/private/') put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in cfgm node" % ssl_cert else: raise RuntimeError("%s doesn't exists locally or in cfgm node" % ssl_cert) if not exists(contrailcertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, contrailcertbundle)) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def setup_apiserver_ssl_certs_node(*nodes): default_certfile = '/etc/contrail/ssl/certs/contrail.pem' default_keyfile = '/etc/contrail/ssl/private/contrail.key' default_cafile = '/etc/contrail/ssl/certs/contrail_ca.pem' contrailcertbundle = get_apiserver_cert_bundle() ssl_certs = ((get_apiserver_certfile(), default_certfile), (get_apiserver_keyfile(), default_keyfile), (get_apiserver_cafile(), default_cafile)) index = env.roledefs['cfgm'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % contrailcertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: cfgm_host = env.roledefs['cfgm'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating apiserver SSL certs in first cfgm node" cfgm_ip = get_contrail_internal_vip() or hstr_to_ip(cfgm_host) sudo('create-api-ssl-certs.sh %s' % cfgm_ip) else: with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first cfgm" sleep(0.1) print "Get SSL cert(%s) from first cfgm" % ssl_cert tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) cfgm node" % env.host_string put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in cfgm node" % ssl_cert else: raise RuntimeError("%s doesn't exists locally or in cfgm node" % ssl_cert) if not exists(contrailcertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, contrailcertbundle)) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def copy_keystone_ssl_key_to_node(*nodes): ssl_key = get_keystone_keyfile() openstack_host = env.roledefs['openstack'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): key_file = '/etc/contrail/ssl/private/%s' % os.path.basename(ssl_key) # Clear old key sudo('rm -f %s' % key_file) with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_key)) get_as_sudo(ssl_key, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/private/") put(tmp_fname, key_file, use_sudo=True) os.remove(tmp_fname) sudo("chown -R contrail:contrail /etc/contrail/ssl/private")
def copy_keystone_ssl_certs_to_node(*nodes): ssl_certs = (get_keystone_certfile(), get_keystone_cafile()) openstack_host = env.roledefs['openstack'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) # Clear old certificate sudo('rm -f %s' % cert_file) with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def setup_keystone_ssl_certs_node(*nodes): default_certfile = '/etc/keystone/ssl/certs/keystone.pem' default_keyfile = '/etc/keystone/ssl/private/keystone.key' default_cafile = '/etc/keystone/ssl/certs/keystone_ca.pem' ssl_certs = ((get_keystone_certfile(), default_certfile), (get_keystone_keyfile(), default_keyfile), (get_keystone_cafile(), default_cafile)) index = env.roledefs['openstack'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) for ssl_cert, default in ssl_certs: if ssl_cert == default: openstack_host = env.roledefs['openstack'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating keystone SSL certs in first openstack node" sudo('create-keystone-ssl-certs.sh %s' % ( get_openstack_internal_vip() or hstr_to_ip(openstack_host))) else: with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first openstack" sleep(0.1) print "Get SSL cert(%s) from first openstack" % ssl_cert tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) openstack node" % env.host_string put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in openstack node" % ssl_cert pass else: raise RuntimeError("%s doesn't exists locally or in openstack node") sudo("chown -R keystone:keystone /etc/keystone/ssl")
def copy_keystone_ssl_certs_to_node(*nodes): ssl_certs = (get_keystone_certfile(), get_keystone_cafile()) openstack_host = env.roledefs['openstack'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) # Clear old certificate sudo('rm -f %s' % cert_file) with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def zookeeper_rolling_restart(): zoo_cfg = "/etc/zookeeper/conf/zoo.cfg" cfgm_nodes = copy.deepcopy(env.roledefs['cfgm']) database_nodes = copy.deepcopy(env.roledefs['database']) zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) % 2) != 1: print "Recommended to run odd number of zookeeper(database) nodes." print "Add a new node to the existing clusters testbed,py and install contrail-install-packages in it.\n\ Installing/Provisioning will be done as part of Upgrade" exit(0) if cfgm_nodes == database_nodes: print "No need for rolling restart." if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return execute('stop_cfgm') execute('backup_zookeeper_database') old_nodes = list(set(cfgm_nodes).difference(set(database_nodes))) new_nodes = list(set(database_nodes).difference(set(cfgm_nodes))) for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) with settings(host_string=new_node, password=get_env_passwords(new_node)): pdist = detect_ostype() print "Install zookeeper in the new node." execute('create_install_repo_node', new_node) remove_package(['supervisor'], pdist) upgrade_package(['python-contrail', 'contrail-openstack-database', 'zookeeper'], pdist) if pdist in ['ubuntu']: sudo("ln -sf /bin/true /sbin/chkconfig") sudo("chkconfig zookeeper on") print "Fix zookeeper configs" sudo("sudo sed 's/^#log4j.appender.ROLLINGFILE.MaxBackupIndex=/log4j.appender.ROLLINGFILE.MaxBackupIndex=/g' /etc/zookeeper/conf/log4j.properties > log4j.properties.new") sudo("sudo mv log4j.properties.new /etc/zookeeper/conf/log4j.properties") if pdist in ['centos']: sudo('echo export ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /usr/lib/zookeeper/bin/zkEnv.sh') if pdist in ['ubuntu']: sudo('echo ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /etc/zookeeper/conf/environment') print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) print "Add new nodes to existing zookeeper quorum" with settings(host_string=cfgm_nodes[0], password=get_env_passwords(cfgm_nodes[0])): for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(new_node), zoo_cfg)) tmp_dir= tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Restart zookeeper in all nodes to make new nodes join zookeeper quorum" for zookeeper_node in cfgm_nodes + new_nodes: with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): put(tmp_dir+'/zoo.cfg', zoo_cfg, use_sudo=True) print "Start Zookeeper in new database node" execute('restart_zookeeper') print "Waiting 5 seconds for the new nodes in the zookeeper quorum to be synced." sleep(5) print "Shutdown old nodes one by one and also make sure leader/follower election is complete after each shut downs" zoo_nodes = cfgm_nodes + database_nodes for old_node in old_nodes: zoo_nodes.remove(old_node) with settings(host_string=old_node, password=get_env_passwords(old_node)): print "Stop Zookeeper in old cfgm node" execute('stop_zookeeper') for zoo_node in zoo_nodes: with settings(host_string=zoo_node, password=get_env_passwords(zoo_node)): sudo("sed -i '/^server.*%s:2888:3888/d' %s" % (hstr_to_ip(zoo_node), zoo_cfg)) retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*zoo_nodes) if (len(zoo_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break elif (len(zoo_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break else: retries -= 1 if retries: for zoo_node in zoo_nodes: with settings(host_string=zoo_node, password=get_env_passwords(zoo_node)): execute('restart_zookeeper') continue print "Zookeeper quorum is not formed. Fix it and retry upgrade" print zookeeper_status exit(1) print "Correct the server id in zoo.cfg for the new nodes in the zookeeper quorum" with settings(host_string=database_nodes[0], password=get_env_passwords(database_nodes[0])): sudo("sed -i '/^server.*3888/d' %s" % zoo_cfg) for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(zookeeper_node), zoo_cfg)) tmp_dir= tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Correct the myid in myid file for the new nodes in the zookeeper quorum" for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) execute('stop_zookeeper') print "Restart all the zookeeper nodes in the new quorum" for zookeeper_node in database_nodes: with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): put(tmp_dir+'/zoo.cfg', zoo_cfg, use_sudo=True) execute('restart_zookeeper') print "Make sure leader/folower election is complete" with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status break elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." break else: retries -= 1 if retries: continue print "Zookeepr leader/follower election has problems. Fix it and retry upgrade" print zookeeper_status exit(1)
def fixup_restart_haproxy_in_openstack_node(*args): keystone_server_lines = '' keystone_admin_server_lines = '' glance_server_lines = '' heat_server_lines = '' cinder_server_lines = '' ceph_restapi_server_lines = '' nova_api_server_lines = '' nova_meta_server_lines = '' nova_vnc_server_lines = '' memcached_server_lines = '' rabbitmq_server_lines = '' mysql_server_lines = '' barbican_server_lines = '' space = ' ' * 3 for host_string in env.roledefs['openstack']: server_index = env.roledefs['openstack'].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) keystone_server_lines +=\ '%s server %s %s:6000 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) keystone_admin_server_lines +=\ '%s server %s %s:35358 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) glance_server_lines +=\ '%s server %s %s:9393 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) heat_server_lines +=\ '%s server %s %s:8005 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) cinder_server_lines +=\ '%s server %s %s:9776 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) ceph_restapi_server_lines +=\ '%s server %s %s:5006 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) nova_api_server_lines +=\ '%s server %s %s:9774 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) nova_meta_server_lines +=\ '%s server %s %s:9775 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) nova_vnc_server_lines +=\ '%s server %s %s:6999 check inter 2000 rise 2 fall 3\n'\ % (space, mgmt_host_ip, mgmt_host_ip) barbican_server_lines +=\ '%s server %s %s:9322 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) if server_index <= 2: memcached_server_lines +=\ '%s server repcache%s %s:11211 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) if server_index == 1: rabbitmq_server_lines +=\ '%s server rabbit%s %s:5672 weight 200 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) else: rabbitmq_server_lines +=\ '%s server rabbit%s %s:5672 weight 100 check inter 2000 rise 2 fall 3 backup\n'\ % (space, server_index, host_ip) if server_index == 1: mysql_server_lines +=\ '%s server mysql%s %s:3306 weight 200 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) else: mysql_server_lines +=\ '%s server mysql%s %s:3306 weight 100 check inter 2000 rise 2 fall 3 backup\n'\ % (space, server_index, host_ip) for host_string in env.roledefs['openstack']: haproxy_config = openstack_haproxy.template.safe_substitute({ '__keystone_backend_servers__' : keystone_server_lines, '__keystone_admin_backend_servers__' : keystone_admin_server_lines, '__glance_backend_servers__' : glance_server_lines, '__heat_backend_servers__' : heat_server_lines, '__cinder_backend_servers__' : cinder_server_lines, '__ceph_restapi_backend_servers__' : ceph_restapi_server_lines, '__nova_api_backend_servers__' : nova_api_server_lines, '__nova_meta_backend_servers__' : nova_meta_server_lines, '__nova_vnc_backend_servers__' : nova_vnc_server_lines, '__barbican_backend_servers__' : barbican_server_lines, '__memcached_servers__' : memcached_server_lines, '__rabbitmq_servers__' : rabbitmq_server_lines, '__mysql_servers__' : mysql_server_lines, '__contrail_hap_user__': 'haproxy', '__contrail_hap_passwd__': get_haproxy_token('openstack'), }) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local("sed -i -e '/^#contrail-openstack-marker-start/,/^#contrail-openstack-marker-end/d' %s" % (tmp_fname)) local("sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" %(tmp_fname)) local("sed -i -e 's/*:5000/*:5001/' %s" % (tmp_fname)) local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local("sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) local('sed -i "/^global/a\ spread-checks 4" %s' % tmp_fname) local('sed -i "/^global/a\ maxconn 10000" %s' % tmp_fname) local('grep -q "tune.bufsize 16384" %s || sed -i "/^global/a\\ tune.bufsize 16384" %s' % (tmp_fname, tmp_fname)) local('grep -q "tune.maxrewrite 1024" %s || sed -i "/^global/a\\ tune.maxrewrite 1024" %s' % (tmp_fname, tmp_fname)) local('grep -q "spread-checks 4" %s || sed -i "/^global/a\\ spread-checks 4" %s' % (tmp_fname, tmp_fname)) local('grep -q "maxconn 10000" %s || sed -i "/^global/a\\ maxconn 10000" %s' % (tmp_fname, tmp_fname)) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, 'a') cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" %(tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") sudo("service supervisor-openstack stop") enable_haproxy() sudo("service haproxy restart") #Change the keystone admin/public port sudo("openstack-config --set /etc/keystone/keystone.conf DEFAULT public_port 6000") sudo("openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_port 35358")
def zookeeper_rolling_restart(): zoo_cfg = "/etc/zookeeper/conf/zoo.cfg" cfgm_nodes = copy.deepcopy(env.roledefs['cfgm']) database_nodes = copy.deepcopy(env.roledefs['database']) zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) % 2) != 1: print "Recommended to run odd number of zookeeper(database) nodes." print "Add a new node to the existing clusters testbed,py and install contrail-install-packages in it.\n\ Installing/Provisioning will be done as part of Upgrade" exit(0) if cfgm_nodes == database_nodes: print "No need for rolling restart." if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return execute('stop_cfgm') execute('backup_zookeeper_database') old_nodes = list(set(cfgm_nodes).difference(set(database_nodes))) new_nodes = list(set(database_nodes).difference(set(cfgm_nodes))) for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) with settings(host_string=new_node, password=env.passwords[new_node]): pdist = detect_ostype() print "Install zookeeper in the new node." execute('create_install_repo_node', new_node) remove_package(['supervisor'], pdist) upgrade_package(['python-contrail', 'contrail-openstack-database', 'zookeeper'], pdist) if pdist in ['ubuntu']: sudo("ln -sf /bin/true /sbin/chkconfig") sudo("chkconfig zookeeper on") print "Fix zookeeper configs" sudo("sudo sed 's/^#log4j.appender.ROLLINGFILE.MaxBackupIndex=/log4j.appender.ROLLINGFILE.MaxBackupIndex=/g' /etc/zookeeper/conf/log4j.properties > log4j.properties.new") sudo("sudo mv log4j.properties.new /etc/zookeeper/conf/log4j.properties") if pdist in ['centos']: sudo('echo export ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /usr/lib/zookeeper/bin/zkEnv.sh') if pdist in ['ubuntu']: sudo('echo ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /etc/zookeeper/conf/environment') print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) print "Add new nodes to existing zookeeper quorum" with settings(host_string=cfgm_nodes[0], password=env.passwords[cfgm_nodes[0]]): for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(new_node), zoo_cfg)) tmp_dir= tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Restart zookeeper in all nodes to make new nodes join zookeeper quorum" for zookeeper_node in cfgm_nodes + new_nodes: with settings(host_string=zookeeper_node, password=env.passwords[zookeeper_node]): put(tmp_dir+'/zoo.cfg', zoo_cfg, use_sudo=True) print "Start Zookeeper in new database node" execute('restart_zookeeper') print "Waiting 5 seconds for the new nodes in the zookeeper quorum to be synced." sleep(5) print "Shutdown old nodes one by one and also make sure leader/follower election is complete after each shut downs" zoo_nodes = cfgm_nodes + database_nodes for old_node in old_nodes: zoo_nodes.remove(old_node) with settings(host_string=old_node, password=env.passwords[old_node]): print "Stop Zookeeper in old cfgm node" execute('stop_zookeeper') for zoo_node in zoo_nodes: with settings(host_string=zoo_node, password=env.passwords[zoo_node]): sudo("sed -i '/^server.*%s:2888:3888/d' %s" % (hstr_to_ip(zoo_node), zoo_cfg)) retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*zoo_nodes) if (len(zoo_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break elif (len(zoo_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break else: retries -= 1 if retries: for zoo_node in zoo_nodes: with settings(host_string=zoo_node, password=env.passwords[zoo_node]): execute('restart_zookeeper') continue print "Zookeeper quorum is not formed. Fix it and retry upgrade" print zookeeper_status exit(1) print "Correct the server id in zoo.cfg for the new nodes in the zookeeper quorum" with settings(host_string=database_nodes[0], password=env.passwords[database_nodes[0]]): sudo("sed -i '/^server.*3888/d' %s" % zoo_cfg) for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(zookeeper_node), zoo_cfg)) tmp_dir= tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Correct the myid in myid file for the new nodes in the zookeeper quorum" for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) with settings(host_string=zookeeper_node, password=env.passwords[zookeeper_node]): print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) execute('stop_zookeeper') print "Restart all the zookeeper nodes in the new quorum" for zookeeper_node in database_nodes: with settings(host_string=zookeeper_node, password=env.passwords[zookeeper_node]): put(tmp_dir+'/zoo.cfg', zoo_cfg, use_sudo=True) execute('restart_zookeeper') print "Make sure leader/folower election is complete" with settings(host_string=zookeeper_node, password=env.passwords[zookeeper_node]): retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status break elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." break else: retries -= 1 if retries: continue print "Zookeepr leader/follower election has problems. Fix it and retry upgrade" print zookeeper_status exit(1)
def setup_cmon_param_zkonupgrade(): cmon_param = '/etc/contrail/ha/cmon_param' zoo_ip_list = [hstr_to_ip(get_control_host_string(\ cassandra_host)) for cassandra_host in env.roledefs['database']] zk_servers_ports = ','.join(['%s:2181' %(s) for s in zoo_ip_list]) zks = 'ZK_SERVER_IP=("' + '" "'.join(zk_servers_ports) + '")' monitor_galera="False" if get_contrail_internal_vip(): monitor_galera="True" # Assuming that keystone is the user and pass # if changed we need to fetch and update these fields keystone_db_user="******" keystone_db_pass="******" cmon_db_user="******" cmon_db_pass="******" sudo("grep -q 'ZK_SERVER_IP' %s || echo '%s' >> %s" % (cmon_param, zks, cmon_param)) sudo("grep -q 'OS_KS_USER' %s || echo 'OS_KS_USER=%s' >> %s" % (cmon_param, keystone_db_user, cmon_param)) sudo("grep -q 'OS_KS_PASS' %s || echo 'OS_KS_PASS=%s' >> %s" % (cmon_param, keystone_db_pass, cmon_param)) sudo("grep -q 'CMON_USER' %s || echo 'CMON_USER=%s' >> %s" % (cmon_param, cmon_db_user, cmon_param)) sudo("grep -q 'CMON_PASS' %s || echo 'CMON_PASS=%s' >> %s" % (cmon_param, cmon_db_pass, cmon_param)) sudo("grep -q 'MONITOR_GALERA' %s || echo 'MONITOR_GALERA=%s' >> %s" % (cmon_param, monitor_galera, cmon_param)) for host_string in env.roledefs['openstack']: haproxy_config = openstack_haproxy.template.safe_substitute({ '__keystone_backend_servers__' : keystone_server_lines, '__keystone_admin_backend_servers__' : keystone_admin_server_lines, '__glance_backend_servers__' : glance_server_lines, '__heat_backend_servers__' : heat_server_lines, '__cinder_backend_servers__' : cinder_server_lines, '__ceph_restapi_backend_servers__' : ceph_restapi_server_lines, '__nova_api_backend_servers__' : nova_api_server_lines, '__nova_meta_backend_servers__' : nova_meta_server_lines, '__nova_vnc_backend_servers__' : nova_vnc_server_lines, '__memcached_servers__' : memcached_server_lines, '__rabbitmq_servers__' : rabbitmq_server_lines, '__mysql_servers__' : mysql_server_lines, '__contrail_hap_user__': 'haproxy', '__contrail_hap_passwd__': 'contrail123', }) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local("sed -i -e '/^#contrail-openstack-marker-start/,/^#contrail-openstack-marker-end/d' %s" % (tmp_fname)) local("sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" %(tmp_fname)) local("sed -i -e 's/*:5000/*:5001/' %s" % (tmp_fname)) local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local("sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) local('sed -i "/^global/a\ spread-checks 4" %s' % tmp_fname) local('sed -i "/^global/a\ maxconn 10000" %s' % tmp_fname) local('grep -q "tune.bufsize 16384" %s || sed -i "/^global/a\\ tune.bufsize 16384" %s' % (tmp_fname, tmp_fname)) local('grep -q "tune.maxrewrite 1024" %s || sed -i "/^global/a\\ tune.maxrewrite 1024" %s' % (tmp_fname, tmp_fname)) local('grep -q "spread-checks 4" %s || sed -i "/^global/a\\ spread-checks 4" %s' % (tmp_fname, tmp_fname)) local('grep -q "maxconn 10000" %s || sed -i "/^global/a\\ maxconn 10000" %s' % (tmp_fname, tmp_fname)) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, 'a') cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" %(tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") sudo("service supervisor-openstack stop") enable_haproxy() sudo("service haproxy restart") #Change the keystone admin/public port sudo("openstack-config --set /etc/keystone/keystone.conf DEFAULT public_port 6000") sudo("openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_port 35358")
def fixup_restart_haproxy_in_openstack_node(*args): keystone_server_lines = '' keystone_admin_server_lines = '' glance_server_lines = '' cinder_server_lines = '' ceph_restapi_server_lines = '' nova_api_server_lines = '' nova_meta_server_lines = '' nova_vnc_server_lines = '' memcached_server_lines = '' rabbitmq_server_lines = '' mysql_server_lines = '' space = ' ' * 3 for host_string in env.roledefs['openstack']: server_index = env.roledefs['openstack'].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) keystone_server_lines +=\ '%s server %s %s:6000 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) keystone_admin_server_lines +=\ '%s server %s %s:35358 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) glance_server_lines +=\ '%s server %s %s:9393 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) cinder_server_lines +=\ '%s server %s %s:9776 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) ceph_restapi_server_lines +=\ '%s server %s %s:5006 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) nova_api_server_lines +=\ '%s server %s %s:9774 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) nova_meta_server_lines +=\ '%s server %s %s:9775 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) nova_vnc_server_lines +=\ '%s server %s %s:6999 check inter 2000 rise 2 fall 3\n'\ % (space, mgmt_host_ip, mgmt_host_ip) if server_index <= 2: memcached_server_lines +=\ '%s server repcache%s %s:11211 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) if server_index == 1: rabbitmq_server_lines +=\ '%s server rabbit%s %s:5672 weight 200 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) else: rabbitmq_server_lines +=\ '%s server rabbit%s %s:5672 weight 100 check inter 2000 rise 2 fall 3 backup\n'\ % (space, server_index, host_ip) if server_index == 1: mysql_server_lines +=\ '%s server mysql%s %s:3306 weight 200 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) else: mysql_server_lines +=\ '%s server mysql%s %s:3306 weight 100 check inter 2000 rise 2 fall 3 backup\n'\ % (space, server_index, host_ip) for host_string in env.roledefs['openstack']: haproxy_config = openstack_haproxy.template.safe_substitute({ '__keystone_backend_servers__': keystone_server_lines, '__keystone_admin_backend_servers__': keystone_admin_server_lines, '__glance_backend_servers__': glance_server_lines, '__cinder_backend_servers__': cinder_server_lines, '__ceph_restapi_backend_servers__': ceph_restapi_server_lines, '__nova_api_backend_servers__': nova_api_server_lines, '__nova_meta_backend_servers__': nova_meta_server_lines, '__nova_vnc_backend_servers__': nova_vnc_server_lines, '__memcached_servers__': memcached_server_lines, '__rabbitmq_servers__': rabbitmq_server_lines, '__mysql_servers__': mysql_server_lines, '__contrail_hap_user__': 'haproxy', '__contrail_hap_passwd__': 'contrail123', }) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local( "sed -i -e '/^#contrail-openstack-marker-start/,/^#contrail-openstack-marker-end/d' %s" % (tmp_fname)) local( "sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" % (tmp_fname)) local("sed -i -e 's/*:5000/*:5001/' %s" % (tmp_fname)) local( "sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local( "sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) local('sed -i "/^global/a\\ tune.bufsize 16384" %s' % tmp_fname) local('sed -i "/^global/a\\ tune.maxrewrite 1024" %s' % tmp_fname) local('sed -i "/^global/a\ spread-checks 4" %s' % tmp_fname) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, 'a') cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" % (tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") sudo("service supervisor-openstack stop") enable_haproxy() sudo("service haproxy restart") #Change the keystone admin/public port sudo( "openstack-config --set /etc/keystone/keystone.conf DEFAULT public_port 6000" ) sudo( "openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_port 35358" )