def setup_keepalived(): """Task to provision VIP for openstack nodes with keepalived""" mgmt_ip = hstr_to_ip(env.host_string) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_password = env.passwords[env.host_string] if (getattr(env, 'openstack_admin_password', None)): openstack_admin_password = env.openstack_admin_password else: openstack_admin_password = '******' internal_vip = get_from_testbed_dict('ha', 'internal_vip', None) external_vip = get_from_testbed_dict('ha', 'external_vip', None) openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] myindex = openstack_host_list.index(self_host) if myindex >= 1: # Wait for VIP to be assiciated to MASTER with settings(host_string=env.roledefs['openstack'][0], warn_only=True): while run("ip addr | grep %s" % internal_vip).failed: sleep(2) print "Waiting for VIP to be associated to MASTER VRRP." continue with cd(INSTALLER_DIR): cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\ --self_ip %s --internal_vip %s --mgmt_self_ip %s\ --openstack_index %d --num_nodes %d" % (openstack_host_password, openstack_admin_password, self_ip, internal_vip, mgmt_ip, (openstack_host_list.index(self_host) + 1), len(env.roledefs['openstack'])) if external_vip: cmd += ' --external_vip %s' % external_vip run(cmd)
def remove_node_from_galera(del_galera_node): """Task to remove a node from the galera cluster. Removes config from other Galera nodes """ if len(env.roledefs['openstack']) < 3: raise RuntimeError("Galera cluster needs of quorum of at least 3 nodes! Cannot remove the node from cluster") self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host)\ for galera_host in openstack_host_list] authserver_ip = get_authserver_ip() internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() zoo_ip_list = [hstr_to_ip(get_control_host_string(\ cassandra_host)) for cassandra_host in env.roledefs['database']] with cd(INSTALLER_DIR): cmd = "remove-galera-node\ --self_ip %s --node_to_del %s --keystone_ip %s --galera_ip_list %s\ --internal_vip %s --openstack_index %d --zoo_ip_list %s" % (self_ip, del_galera_node, authserver_ip, ' '.join(galera_ip_list), internal_vip, (openstack_host_list.index(self_host) + 1), ' '.join(zoo_ip_list)) if external_vip: cmd += ' --external_vip %s' % external_vip sudo(cmd)
def setup_keepalived_node(role): """Task to provision VIP for node with keepalived""" mgmt_ip = hstr_to_ip(env.host_string) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() if role == 'cfgm': internal_vip = get_contrail_internal_vip() external_vip = get_contrail_external_vip() keepalived_host_list = [get_control_host_string(keepalived_host)\ for keepalived_host in env.roledefs[role]] myindex = keepalived_host_list.index(self_host) if myindex >= 1: # Wait for VIP to be assiciated to MASTER with settings(host_string=env.roledefs[role][0], warn_only=True): while sudo("ip addr | grep %s" % internal_vip).failed: sleep(2) print "Waiting for VIP to be associated to MASTER VRRP." continue with cd(INSTALLER_DIR): cmd = "setup-vnc-keepalived\ --self_ip %s --internal_vip %s --mgmt_self_ip %s\ --self_index %d --num_nodes %d --role %s" % ( self_ip, internal_vip, mgmt_ip, (keepalived_host_list.index(self_host) + 1), len( env.roledefs[role]), role) if external_vip: cmd += ' --external_vip %s' % external_vip sudo(cmd)
def setup_keepalived(): """Task to provision VIP for openstack nodes with keepalived""" mgmt_ip = hstr_to_ip(env.host_string) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_password = env.passwords[env.host_string] if (getattr(env, 'openstack_admin_password', None)): openstack_admin_password = env.openstack_admin_password else: openstack_admin_password = '******' internal_vip = get_from_testbed_dict('ha', 'internal_vip', None) external_vip = get_from_testbed_dict('ha', 'external_vip', None) openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] with cd(INSTALLER_DIR): cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\ --self_ip %s --internal_vip %s --mgmt_self_ip %s\ --openstack_index %d" % (openstack_host_password, openstack_admin_password, self_ip, internal_vip, mgmt_ip, (openstack_host_list.index(self_host) + 1)) if external_vip: cmd += ' --external_vip %s' % external_vip run(cmd)
def setup_galera_cluster(): """Task to cluster the openstack nodes with galera""" if len(env.roledefs['openstack']) <= 1: print "Single Openstack cluster, skipping galera cluster setup." return if env.roledefs['openstack'].index(env.host_string) == 0: execute('setup_passwordless_ssh', *env.roledefs['openstack']) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_password = env.passwords[env.host_string] if (getattr(env, 'openstack_admin_password', None)): openstack_admin_password = env.openstack_admin_password else: openstack_admin_password = '******' openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host)\ for galera_host in openstack_host_list] keystone_ip = get_keystone_ip() internal_vip = get_openstack_internal_vip() with cd(INSTALLER_DIR): run("PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-galera.py\ --self_ip %s --keystone_ip %s --galera_ip_list %s\ --internal_vip %s --openstack_index %d" % (openstack_host_password, openstack_admin_password, self_ip, keystone_ip, ' '.join(galera_ip_list), internal_vip, (openstack_host_list.index(self_host) + 1)))
def setup_keepalived(): """Task to provision VIP for openstack nodes with keepalived""" mgmt_ip = hstr_to_ip(env.host_string) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_password = env.passwords[env.host_string] if (getattr(env, 'openstack_admin_password', None)): openstack_admin_password = env.openstack_admin_password else: openstack_admin_password = '******' internal_vip = get_from_testbed_dict('ha', 'internal_vip', None) external_vip = get_from_testbed_dict('ha', 'external_vip', None) openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] with cd(INSTALLER_DIR): cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\ --self_ip %s --internal_vip %s --mgmt_self_ip %s\ --openstack_index %d" % ( openstack_host_password, openstack_admin_password, self_ip, internal_vip, mgmt_ip, (openstack_host_list.index(self_host) + 1)) if external_vip: cmd += ' --external_vip %s' % external_vip run(cmd)
def setup_keystone_ssl_certs_node(*nodes): default_certfile = '/etc/keystone/ssl/certs/keystone.pem' default_keyfile = '/etc/keystone/ssl/private/keystone.key' default_cafile = '/etc/keystone/ssl/certs/keystone_ca.pem' keystonecertbundle = get_keystone_cert_bundle() ssl_certs = ((get_keystone_certfile(), default_certfile), (get_keystone_keyfile(), default_keyfile), (get_keystone_cafile(), default_cafile)) index = env.roledefs['openstack'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % keystonecertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: openstack_host = env.roledefs['openstack'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating keystone SSL certs in first openstack node" subject_alt_names_mgmt = [hstr_to_ip(host) for host in env.roledefs['openstack']] subject_alt_names_ctrl = [hstr_to_ip(get_control_host_string(host)) for host in env.roledefs['openstack']] subject_alt_names = subject_alt_names_mgmt + subject_alt_names_ctrl if get_openstack_external_vip(): subject_alt_names.append(get_openstack_external_vip()) sudo('create-keystone-ssl-certs.sh %s %s' % ( get_openstack_internal_vip() or hstr_to_ip(get_control_host_string(openstack_host)), ','.join(subject_alt_names))) else: with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first openstack" sleep(0.1) print "Get SSL cert(%s) from first openstack" % ssl_cert tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) openstack node" % env.host_string sudo('mkdir -p /etc/keystone/ssl/certs/') sudo('mkdir -p /etc/keystone/ssl/private/') put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in openstack node" % ssl_cert pass else: raise RuntimeError("%s doesn't exists locally or in openstack node") if not exists(keystonecertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, keystonecertbundle)) sudo("chown -R keystone:keystone /etc/keystone/ssl")
def setup_galera_cluster(): """Task to cluster the openstack nodes with galera""" if len(env.roledefs['openstack']) <= 1: print "Single Openstack cluster, skipping galera cluster setup." return if env.roledefs['openstack'].index(env.host_string) == 0: execute('setup_passwordless_ssh', *env.roledefs['openstack']) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_password = env.passwords[env.host_string] if (getattr(env, 'openstack_admin_password', None)): openstack_admin_password = env.openstack_admin_password else: openstack_admin_password = '******' openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host)\ for galera_host in openstack_host_list] keystone_ip = get_keystone_ip() internal_vip = get_from_testbed_dict('ha', 'internal_vip', None) with cd(INSTALLER_DIR): run("PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-galera.py\ --self_ip %s --keystone_ip %s --galera_ip_list %s\ --internal_vip %s --openstack_index %d" % (openstack_host_password, openstack_admin_password, self_ip, keystone_ip, ' '.join(galera_ip_list), internal_vip, (openstack_host_list.index(self_host) + 1)))
def setup_keepalived_node(role): """Task to provision VIP for node with keepalived""" mgmt_ip = hstr_to_ip(env.host_string) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() if role == 'cfgm': internal_vip = get_contrail_internal_vip() external_vip = get_contrail_external_vip() keepalived_host_list = [get_control_host_string(keepalived_host)\ for keepalived_host in env.roledefs[role]] myindex = keepalived_host_list.index(self_host) if myindex >= 1: # Wait for VIP to be assiciated to MASTER with settings(host_string=env.roledefs[role][0], warn_only=True): while sudo("ip addr | grep %s" % internal_vip).failed: sleep(2) print "Waiting for VIP to be associated to MASTER VRRP." continue with cd(INSTALLER_DIR): cmd = "setup-vnc-keepalived\ --self_ip %s --internal_vip %s --mgmt_self_ip %s\ --self_index %d --num_nodes %d --role %s" % ( self_ip, internal_vip, mgmt_ip, (keepalived_host_list.index(self_host) + 1), len(env.roledefs[role]), role) if external_vip: cmd += ' --external_vip %s' % external_vip sudo(cmd)
def setup_galera_cluster(): """Task to cluster the openstack nodes with galera""" if len(env.roledefs["openstack"]) <= 1: print "Single Openstack cluster, skipping galera cluster setup." return if env.roledefs["openstack"].index(env.host_string) == 0: execute("setup_passwordless_ssh", *env.roledefs["openstack"]) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_list = [get_control_host_string(openstack_host) for openstack_host in env.roledefs["openstack"]] galera_ip_list = [hstr_to_ip(galera_host) for galera_host in openstack_host_list] authserver_ip = get_authserver_ip() internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() zoo_ip_list = [hstr_to_ip(get_control_host_string(cassandra_host)) for cassandra_host in env.roledefs["database"]] monitor_galera = "False" if get_openstack_internal_vip(): monitor_galera = "True" cmon_db_user = "******" cmon_db_pass = "******" keystone_db_user = "******" keystone_db_pass = "******" with cd(INSTALLER_DIR): cmd = ( "setup-vnc-galera\ --self_ip %s --keystone_ip %s --galera_ip_list %s\ --internal_vip %s --openstack_index %d --zoo_ip_list %s --keystone_user %s\ --keystone_pass %s --cmon_user %s --cmon_pass %s --monitor_galera %s" % ( self_ip, keystone_ip, " ".join(galera_ip_list), internal_vip, (openstack_host_list.index(self_host) + 1), " ".join(zoo_ip_list), keystone_db_user, keystone_db_pass, cmon_db_user, cmon_db_pass, monitor_galera, ) ) if external_vip: cmd += " --external_vip %s" % external_vip sudo(cmd)
def config_rabbitmq(): rabbit_hosts = [] rabbit_conf = '/etc/rabbitmq/rabbitmq.config' if len(env.roledefs['rabbit']) <= 1 and detect_ostype() == 'redhat': print "CONFIG_RABBITMQ: Skip creating rabbitmq.config for Single node setup" return for host_string in env.roledefs['rabbit']: with settings(host_string=host_string, password=env.passwords[host_string]): host_name = sudo('hostname -s') + ctrl rabbit_hosts.append("\'rabbit@%s\'" % host_name) rabbit_hosts = ', '.join(rabbit_hosts) rabbitmq_config_template = rabbitmq_config if len(env.roledefs['rabbit']) == 1: rabbitmq_config_template = rabbitmq_config_single_node rabbitmq_configs = rabbitmq_config_template.template.safe_substitute({ '__control_intf_ip__': hstr_to_ip(get_control_host_string(env.host_string)), '__rabbit_hosts__': rabbit_hosts, }) tmp_fname = "/tmp/rabbitmq_%s.config" % env.host_string cfg_file = open(tmp_fname, 'w') cfg_file.write(rabbitmq_configs) cfg_file.close() put(tmp_fname, "/etc/rabbitmq/rabbitmq.config", use_sudo=True) local("rm %s" % (tmp_fname))
def purge_node_from_openstack_cluster(del_openstack_node): if ping_test(del_openstack_node): # If CMON is running in the node to be purged, stop it. # Invalidate the config. with settings(host_string=del_openstack_node, warn_only=True): sudo("service contrail-hamon stop") sudo("service cmon stop") sudo("chkconfig contrail-hamon off") sudo("mv /etc/cmon.cnf /etc/cmon.cnf.removed") del_openstack_node_ip = hstr_to_ip(del_openstack_node) del_openstack_ctrl_ip = hstr_to_ip(get_control_host_string(del_openstack_node)) execute('fixup_restart_haproxy_in_openstack') execute("restart_openstack") execute('remove_node_from_galera', del_openstack_node_ip) execute('fix_cmon_param_and_add_keys_to_compute') with settings(host_string = env.roledefs['openstack'][0]): sudo("unregister-openstack-services --node_to_unregister %s" % del_openstack_ctrl_ip) if ping_test(del_openstack_node): with settings(host_string=del_openstack_node, warn_only=True): sudo("service mysql stop") sudo("service supervisor-openstack stop") sudo("chkconfig supervisor-openstack off")
def purge_node_from_database(del_db_node): del_db_ctrl_ip = hstr_to_ip(get_control_host_string(del_db_node)) with settings(host_string = env.roledefs['database'][0], warn_only = True): is_part_of_db = local('nodetool status | grep %s' % del_db_ctrl_ip).succeeded if not is_part_of_db: print "Node %s is not part of DB Cluster", del_db_node return is_seed = local('grep "\- seeds: " /etc/cassandra/cassandra.yaml | grep %s' % del_db_ctrl_ip).succeeded is_alive = local('nodetool status | grep %s | grep "UN"' % del_db_ctrl_ip).succeeded if is_seed: # If the node to be removed is a seed node, then we need to re-establish other nodes # as seed node before removing this node. print "Removing the seed node %s from DB Cluster and re-electing new seed nodes", del_db_ctrl_ip for db in env.roledefs['database']: with settings(host_string = db): cmd = frame_vnc_database_cmd(db, cmd='readjust-cassandra-seed-list') sudo(cmd) if is_alive and ping_test(del_db_node): # Node is active in the cluster. The tokens need to be redistributed before the # node can be brought down. with settings(host_string=del_db_node): cmd = frame_vnc_database_cmd(del_db_node, cmd='decommission-cassandra-node') sudo(cmd) else: # Node is part of the cluster but not active. Hence, remove the node # from the cluster with settings(host_string=env.roledefs['database'][0]): cmd = frame_vnc_database_cmd(del_db_node, cmd='remove-cassandra-node') sudo(cmd)
def setup_cmon_param_zkonupgrade_node(*args): if len(env.roledefs['openstack']) <= 1: print "Single Openstack cluster, skipping cmon zookeeper setup." return for host_string in args: cmon_param = '/etc/contrail/ha/cmon_param' zoo_ip_list = [hstr_to_ip(get_control_host_string(\ cassandra_host)) for cassandra_host in env.roledefs['database']] zk_servers_ports = ','.join(['%s:2181' %(s) for s in zoo_ip_list]) zks = 'ZK_SERVER_IP="%s"' % (zk_servers_ports) monitor_galera="False" if get_contrail_internal_vip(): monitor_galera="True" # Assuming that keystone is the user and pass # if changed we need to fetch and update these fields keystone_db_user="******" keystone_db_pass="******" cmon_db_user="******" cmon_db_pass="******" sudo("grep -q 'ZK_SERVER_IP' %s || echo '%s' >> %s" % (cmon_param, zks, cmon_param)) sudo("grep -q 'OS_KS_USER' %s || echo 'OS_KS_USER=%s' >> %s" % (cmon_param, keystone_db_user, cmon_param)) sudo("grep -q 'OS_KS_PASS' %s || echo 'OS_KS_PASS=%s' >> %s" % (cmon_param, keystone_db_pass, cmon_param)) sudo("grep -q 'CMON_USER' %s || echo 'CMON_USER=%s' >> %s" % (cmon_param, cmon_db_user, cmon_param)) sudo("grep -q 'CMON_PASS' %s || echo 'CMON_PASS=%s' >> %s" % (cmon_param, cmon_db_pass, cmon_param)) sudo("grep -q 'MONITOR_GALERA' %s || echo 'MONITOR_GALERA=%s' >> %s" % (cmon_param, monitor_galera, cmon_param))
def fix_restart_xinetd_conf_node(*args): """Fix contrail-mysqlprobe to accept connection only from this node, USAGE:fab fix_restart_xinetd_conf_node:[email protected],[email protected]""" for host_string in args: self_ip = hstr_to_ip(get_control_host_string(host_string)) run("sed -i -e 's#only_from = 0.0.0.0/0#only_from = %s 127.0.0.1#' /etc/xinetd.d/contrail-mysqlprobe" % self_ip) run("service xinetd restart") run("chkconfig xinetd on")
def verfiy_and_update_hosts(host_name, host_string): # Need to have the alias created to map to the hostname # this is required for erlang node to cluster using # the same interface that is used for rabbitMQ TCP listener with settings(hide('stderr'), warn_only=True): if sudo('grep %s /etc/hosts' % (host_name+ctrl)).failed: sudo("echo '%s %s %s' >> /etc/hosts" % (hstr_to_ip(get_control_host_string(host_string)), host_name, host_name+ctrl))
def detach_vrouter_node(*args): """Detaches one/more compute node from the existing cluster.""" cfgm_host = get_control_host_string(env.roledefs['cfgm'][0]) cfgm_host_password = get_env_passwords(env.roledefs['cfgm'][0]) cfgm_ip = hstr_to_ip(cfgm_host) nova_compute = "openstack-nova-compute" for host_string in args: with settings(host_string=host_string, warn_only=True): sudo("service supervisor-vrouter stop") if detect_ostype() in ['ubuntu']: nova_compute = "nova-compute" mode = get_mode(host_string) if (mode == 'vcenter'): nova_compute = "" if (nova_compute != ""): sudo("service %s stop" % nova_compute) compute_hostname = sudo("hostname") with settings(host_string=env.roledefs['cfgm'][0], pasword=cfgm_host_password): sudo( "python /opt/contrail/utils/provision_vrouter.py --host_name %s --host_ip %s --api_server_ip %s --oper del %s" % (compute_hostname, host_string.split('@')[1], cfgm_ip, get_mt_opts())) execute("restart_control")
def drop_analytics_keyspace_node(*args): for host_string in args: with settings(host_string=host_string, warn_only=True): CASSANDRA_CMD = 'cqlsh %s -f ' % hstr_to_ip( get_control_host_string(host_string)) print "Dropping analytics keyspace.. this may take a while.." sudo( "echo 'describe keyspace \"ContrailAnalytics\";' > /tmp/cassandra_commands_file" ) if sudo(CASSANDRA_CMD + '/tmp/cassandra_commands_file').succeeded: sudo( "echo 'drop keyspace \"ContrailAnalytics\";' > /tmp/cassandra_commands_file" ) if not sudo(CASSANDRA_CMD + '/tmp/cassandra_commands_file').succeeded: print "WARN: Drop keyspace ContrailAnalytics failed.." else: print "INFO: keyspace ContrailAnalytics is dropped.." print "INFO: if snapshots are created, manual deletion may be required to free up disk.." sudo( "echo 'drop keyspace \"ContrailAnalyticsCql\";' > /tmp/cassandra_commands_file" ) if not sudo(CASSANDRA_CMD + '/tmp/cassandra_commands_file').succeeded: print "WARN: Drop keyspace ContrailAnalyticsCql failed.." else: print "INFO: keyspace ContrailAnalyticsCql is dropped.." print "INFO: if snapshots are created, manual deletion may be required to free up disk.."
def fix_restart_xinetd_conf_node(*args): """Fix contrail-mysqlprobe to accept connection only from this node, USAGE:fab fix_restart_xinetd_conf_node:[email protected],[email protected]""" for host_string in args: self_ip = hstr_to_ip(get_control_host_string(host_string)) sudo("sed -i -e 's#only_from = 0.0.0.0/0#only_from = %s 127.0.0.1#' /etc/xinetd.d/contrail-mysqlprobe" % self_ip) sudo("service xinetd restart") sudo("chkconfig xinetd on")
def fixup_restart_haproxy_in_collector_node(*args): contrail_analytics_api_server_lines = '' space = ' ' * 3 for host_string in env.roledefs['collector']: server_index = env.roledefs['collector'].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) contrail_analytics_api_server_lines +=\ '%s server %s %s:9081 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) for host_string in env.roledefs['collector']: haproxy_config = collector_haproxy.template.safe_substitute({ '__contrail_analytics_api_backend_servers__': contrail_analytics_api_server_lines, '__contrail_hap_user__': 'haproxy', '__contrail_hap_passwd__': 'contrail123', }) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local( "sed -i -e '/^#contrail-collector-marker-start/,/^#contrail-collector-marker-end/d' %s" % (tmp_fname)) local( "sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" % (tmp_fname)) local( "sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local( "sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, 'a') cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" % (tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") enable_haproxy() sudo("service haproxy restart")
def fix_wsrep_cluster_address(): openstack_host_list = [get_control_host_string(openstack_host) for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host) for galera_host in openstack_host_list] with settings(host_string=env.roledefs['openstack'][0], password=get_env_passwords(env.roledefs['openstack'][0])): wsrep_conf = '/etc/mysql/my.cnf' if detect_ostype() in ['ubuntu']: wsrep_conf = '/etc/mysql/conf.d/wsrep.cnf' sudo('sed -ibak "s#wsrep_cluster_address=.*#wsrep_cluster_address=gcomm://%s:4567#g" %s' % (':4567,'.join(galera_ip_list), wsrep_conf))
def fix_wsrep_cluster_address(): openstack_host_list = [get_control_host_string(openstack_host) for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host) for galera_host in openstack_host_list] with settings(host_string=env.roledefs['openstack'][0], password=env.passwords[env.roledefs['openstack'][0]]): wsrep_conf = '/etc/mysql/my.cnf' if detect_ostype() in ['Ubuntu']: wsrep_conf = '/etc/mysql/conf.d/wsrep.cnf' run('sed -ibak "s#wsrep_cluster_address=.*#wsrep_cluster_address=gcomm://%s:4567#g" %s' % (':4567,'.join(galera_ip_list), wsrep_conf))
def issu_contrail_generate_moreconf(final_conf): sudo('touch %s' %(final_conf)) cmd = 'openstack-config --set %s DEFAULTS' %(final_conf) new_api_info = ','.join(["'%s':['root', '%s']" %(hstr_to_ip(get_control_host_string(config_host)), env.passwords[config_host]) for config_host in env.roledefs['cfgm']]) new_api_info = '"{'+new_api_info+'}"' sudo('%s new_api_info %s' %(cmd, new_api_info)) db_host_info = ','.join(["'%s':'%s'" %(hstr_to_ip(get_control_host_string(host)), get_real_hostname(host)) for host in env.roledefs['database']]) db_host_info = '"{'+db_host_info+'}"' sudo('%s db_host_info %s' %(cmd, db_host_info)) config_host_info = ','.join(["'%s':'%s'" %(hstr_to_ip(get_control_host_string(host)), get_real_hostname(host)) for host in env.roledefs['cfgm']]) config_host_info = '"{'+config_host_info+'}"' sudo('%s config_host_info %s' %(cmd, config_host_info)) analytics_host_info = ','.join(["'%s':'%s'" %(hstr_to_ip(get_control_host_string(host)), get_real_hostname(host)) for host in env.roledefs['collector']]) analytics_host_info = '"{'+analytics_host_info+'}"' sudo('%s analytics_host_info %s' %(cmd, analytics_host_info)) control_host_info = ','.join(["'%s':'%s'" %(hstr_to_ip(get_control_host_string(host)), get_real_hostname(host)) for host in env.roledefs['control']]) control_host_info = '"{'+control_host_info+'}"' sudo('%s control_host_info %s' %(cmd, control_host_info)) admin_user, admin_password = get_authserver_credentials() sudo('%s admin_password %s' %(cmd, admin_password)) sudo('%s admin_user %s' %(cmd, admin_user)) sudo('%s admin_tenant_name %s' %(cmd, get_admin_tenant_name())) sudo('%s openstack_ip %s' %(cmd, get_authserver_ip())) sudo('%s api_server_ip %s' %(cmd, hstr_to_ip(get_control_host_string(env.roledefs['cfgm'][0]))))
def fixup_restart_haproxy_in_collector_node(*args): contrail_analytics_api_server_lines = "" space = " " * 3 for host_string in env.roledefs["collector"]: server_index = env.roledefs["collector"].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) contrail_analytics_api_server_lines += "%s server %s %s:9081 check inter 2000 rise 2 fall 3\n" % ( space, host_ip, host_ip, ) for host_string in env.roledefs["collector"]: haproxy_config = collector_haproxy.template.safe_substitute( { "__contrail_analytics_api_backend_servers__": contrail_analytics_api_server_lines, "__contrail_hap_user__": "haproxy", "__contrail_hap_passwd__": "contrail123", } ) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local( "sed -i -e '/^#contrail-collector-marker-start/,/^#contrail-collector-marker-end/d' %s" % (tmp_fname) ) local("sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" % (tmp_fname)) local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local("sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, "a") cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" % (tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") enable_haproxy() sudo("service haproxy restart")
def detach_vrouter_node(*args): """Detaches one/more compute node from the existing cluster.""" cfgm_host = get_control_host_string(env.roledefs['cfgm'][0]) cfgm_host_password = env.passwords[env.roledefs['cfgm'][0]] cfgm_ip = hstr_to_ip(cfgm_host) for host_string in args: compute_hostname = socket.gethostbyaddr(hstr_to_ip(host_string))[0].split('.')[0] with settings(host_string=host_string, warn_only=True): run("service supervisor-vrouter stop") with settings(host_string=cfgm_host, pasword=cfgm_host_password): run("python /opt/contrail/utils/provision_vrouter.py --host_name %s --host_ip %s --api_server_ip %s --oper del" % (compute_hostname, host_string.split('@')[1], cfgm_ip)) execute("restart_control")
def setup_apiserver_ssl_certs_node(*nodes): default_certfile = '/etc/contrail/ssl/certs/contrail.pem' default_keyfile = '/etc/contrail/ssl/private/contrail.key' default_cafile = '/etc/contrail/ssl/certs/contrail_ca.pem' contrailcertbundle = get_apiserver_cert_bundle() ssl_certs = ((get_apiserver_certfile(), default_certfile), (get_apiserver_keyfile(), default_keyfile), (get_apiserver_cafile(), default_cafile)) index = env.roledefs['cfgm'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % contrailcertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: cfgm_host = env.roledefs['cfgm'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating apiserver SSL certs in first cfgm node" cfgm_ip = get_contrail_internal_vip() or hstr_to_ip(get_control_host_string(cfgm_host)) sudo('create-api-ssl-certs.sh %s' % cfgm_ip) else: with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first cfgm" sleep(0.1) print "Get SSL cert(%s) from first cfgm" % ssl_cert tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) cfgm node" % env.host_string sudo('mkdir -p /etc/contrail/ssl/certs/') sudo('mkdir -p /etc/contrail/ssl/private/') put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in cfgm node" % ssl_cert else: raise RuntimeError("%s doesn't exists locally or in cfgm node" % ssl_cert) if not exists(contrailcertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, contrailcertbundle)) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def setup_cmon_schema(): """Task to configure cmon schema in the openstack nodes to monitor galera cluster""" if len(env.roledefs['openstack']) <= 1: print "Single Openstack cluster, skipping cmon schema setup." return openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host)\ for galera_host in openstack_host_list] internal_vip = get_openstack_internal_vip() mysql_token = sudo("cat /etc/contrail/mysql.token") pdist = detect_ostype() if pdist in ['ubuntu']: mysql_svc = 'mysql' elif pdist in ['centos', 'redhat']: mysql_svc = 'mysqld' # Create cmon schema sudo('mysql -u root -p%s -e "CREATE SCHEMA IF NOT EXISTS cmon"' % mysql_token) sudo('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_db.sql' % mysql_token) sudo('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_data.sql' % mysql_token) # insert static data sudo( 'mysql -u root -p%s -e "use cmon; insert into cluster(type) VALUES (\'galera\')"' % mysql_token) host_list = galera_ip_list + ['localhost', '127.0.0.1', internal_vip] # Create cmon user for host in host_list: mysql_cmon_user_cmd = 'mysql -u root -p%s -e "CREATE USER \'cmon\'@\'%s\' IDENTIFIED BY \'cmon\'"' % ( mysql_token, host) with settings(hide('everything'), warn_only=True): sudo(mysql_cmon_user_cmd) mysql_cmd = "mysql -uroot -p%s -e" % mysql_token # Grant privilages for cmon user. for host in host_list: sudo( '%s "GRANT ALL PRIVILEGES on *.* TO cmon@%s IDENTIFIED BY \'cmon\' WITH GRANT OPTION"' % (mysql_cmd, host)) # Restarting mysql in all openstack nodes for host_string in env.roledefs['openstack']: with settings(host_string=host_string): sudo("service %s restart" % mysql_svc)
def setup_galera_cluster(): """Task to cluster the openstack nodes with galera""" if len(env.roledefs['openstack']) <= 1: print "Single Openstack cluster, skipping galera cluster setup." return if env.roledefs['openstack'].index(env.host_string) == 0: execute('setup_passwordless_ssh', *env.roledefs['openstack']) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host)\ for galera_host in openstack_host_list] keystone_ip = get_keystone_ip() internal_vip = get_openstack_internal_vip() with cd(INSTALLER_DIR): sudo("setup-vnc-galera\ --self_ip %s --keystone_ip %s --galera_ip_list %s\ --internal_vip %s --openstack_index %d" % ( self_ip, keystone_ip, ' '.join(galera_ip_list), internal_vip, (openstack_host_list.index(self_host) + 1)))
def setup_galera_cluster(): """Task to cluster the openstack nodes with galera""" if len(env.roledefs['openstack']) <= 1: print "Single Openstack cluster, skipping galera cluster setup." return if env.roledefs['openstack'].index(env.host_string) == 0: execute('setup_passwordless_ssh', *env.roledefs['openstack']) self_host = get_control_host_string(env.host_string) self_ip = hstr_to_ip(self_host) openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host)\ for galera_host in openstack_host_list] keystone_ip = get_keystone_ip() internal_vip = get_openstack_internal_vip() with cd(INSTALLER_DIR): sudo("setup-vnc-galera\ --self_ip %s --keystone_ip %s --galera_ip_list %s\ --internal_vip %s --openstack_index %d" % (self_ip, keystone_ip, ' '.join(galera_ip_list), internal_vip, (openstack_host_list.index(self_host) + 1)))
def rabbitmq_env(): erl_node_name = None rabbit_env_conf = "/etc/rabbitmq/rabbitmq-env.conf" with settings(host_string=env.host_string, password=get_env_passwords(env.host_string)): host_name = sudo("hostname -s") + ctrl erl_node_name = "rabbit@%s" % (host_name) rabbitmq_env_template = rabbitmq_env_conf rmq_env_conf = rabbitmq_env_template.template.safe_substitute( {"__erl_node_ip__": hstr_to_ip(get_control_host_string(env.host_string)), "__erl_node_name__": erl_node_name} ) tmp_fname = "/tmp/rabbitmq-env-%s.conf" % env.host_string cfg_file = open(tmp_fname, "w") cfg_file.write(rmq_env_conf) cfg_file.close() put(tmp_fname, rabbit_env_conf, use_sudo=True) local("rm %s" % (tmp_fname))
def issu_contrail_switch_collector_in_compute_node(*args): for host in args: collector_list = '' with settings(host_string=host): for i in range(0, len(env.roledefs['collector'])): collector_list += "%s:8086 " %(hstr_to_ip(get_control_host_string(env.roledefs['collector'][i]))) with settings(warn_only=True): file_list = sudo('ls /etc/contrail/contrail-tor-agent*') if file_list.succeeded: file_list = file_list.split() else: file_list = [] file_list.append('/etc/contrail/contrail-vrouter-agent.conf') for cfile in file_list: run('openstack-config --set %s DEFAULT collectors "%s"' % (cfile, collector_list)) run('openstack-config --set /etc/contrail/contrail-vrouter-nodemgr.conf COLLECTOR server_list "%s"' % (collector_list))
def rabbitmq_env(): erl_node_name = None rabbit_env_conf = '/etc/rabbitmq/rabbitmq-env.conf' with settings(host_string=env.host_string, password=get_env_passwords(env.host_string)): host_name = sudo('hostname -s') + ctrl erl_node_name = "rabbit@%s" % (host_name) rabbitmq_env_template = rabbitmq_env_conf rmq_env_conf = rabbitmq_env_template.template.safe_substitute({ '__erl_node_ip__' : hstr_to_ip(get_control_host_string(env.host_string)), '__erl_node_name__' : erl_node_name, }) tmp_fname = "/tmp/rabbitmq-env-%s.conf" % env.host_string cfg_file = open(tmp_fname, 'w') cfg_file.write(rmq_env_conf) cfg_file.close() put(tmp_fname, rabbit_env_conf, use_sudo=True) local("rm %s" %(tmp_fname))
def rabbitmq_env(): erl_node_name = None rabbit_env_conf = '/etc/rabbitmq/rabbitmq-env.conf' with settings(host_string=env.host_string, password=env.passwords[env.host_string]): host_name = run('hostname -s') + ctrl erl_node_name = "rabbit@%s" % (host_name) rabbitmq_env_template = rabbitmq_env_conf rmq_env_conf = rabbitmq_env_template.template.safe_substitute({ '__erl_node_ip__' : hstr_to_ip(get_control_host_string(env.host_string)), '__erl_node_name__' : erl_node_name, }) tmp_fname = "/tmp/rabbitmq-env-%s.conf" % env.host_string cfg_file = open(tmp_fname, 'w') cfg_file.write(rmq_env_conf) cfg_file.close() put(tmp_fname, rabbit_env_conf) local("rm %s" %(tmp_fname))
def detach_vrouter_node(*args): """Detaches one/more compute node from the existing cluster.""" cfgm_host = get_control_host_string(env.roledefs['cfgm'][0]) cfgm_host_password = get_env_passwords(env.roledefs['cfgm'][0]) cfgm_ip = hstr_to_ip(cfgm_host) nova_compute = "openstack-nova-compute" if detect_ostype() in ['ubuntu']: nova_compute = "nova-compute" for host_string in args: compute_hostname = socket.gethostbyaddr(hstr_to_ip(host_string))[0].split('.')[0] with settings(host_string=host_string, warn_only=True): sudo("service supervisor-vrouter stop") sudo("service %s stop" % nova_compute) with settings(host_string=cfgm_host, pasword=cfgm_host_password): sudo("python /opt/contrail/utils/provision_vrouter.py --host_name %s --host_ip %s --api_server_ip %s --oper del %s" % (compute_hostname, host_string.split('@')[1], cfgm_ip, get_mt_opts())) execute("restart_control")
def config_rabbitmq(): rabbit_hosts = [] rabbit_conf = '/etc/rabbitmq/rabbitmq.config' for host_string in env.roledefs['cfgm']: with settings(host_string=host_string, password=env.passwords[host_string]): host_name = run('hostname') rabbit_hosts.append("\'rabbit@%s\'" % host_name) rabbit_hosts = ', '.join(rabbit_hosts) rabbitmq_configs = rabbitmq_config.template.safe_substitute({ '__control_intf_ip__' : hstr_to_ip(get_control_host_string(env.host_string)), '__rabbit_hosts__' : rabbit_hosts, }) tmp_fname = "/tmp/rabbitmq_%s.config" % env.host_string cfg_file = open(tmp_fname, 'a') cfg_file.write(rabbitmq_configs) cfg_file.close() put(tmp_fname, "/etc/rabbitmq/rabbitmq.config") local("rm %s" %(tmp_fname))
def purge_node_from_rabbitmq_cluster(del_rabbitmq_node, role): if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'no' and\ role == 'openstack': # We are not managing the RabbitMQ server. No-op. return if get_contrail_internal_vip() != get_openstack_internal_vip() and\ role == 'cfgm': # Openstack and Contrail are in two different nodes. Cfgm # rabbitmq will point to the Openstack node. No-op. return env.roledefs['rabbit'] = env.roledefs[role] del_rabbitmq_ip = hstr_to_ip(del_rabbitmq_node) del_rabbitmq_ctrl_ip = hstr_to_ip(get_control_host_string(del_rabbitmq_node)) if ping_test(del_rabbitmq_node): with settings(host_string = del_rabbitmq_node, warn_only = True): sudo("rabbitmqctl stop_app") sudo("rabbitmqctl reset") sudo("service supervisor-support-service stop") sudo("mv /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.removed") sudo("mv /etc/rabbitmq/rabbitmq.config /etc/rabbitmq/rabbitmq.config.removed") else: # If the node is not reachable, then delete the node remotely from one # of the nodes in the cluster. with settings(host_string = env.roledefs['rabbit'][0], warn_only = True): hostname = local('getent hosts %s | awk \'{print $3\'}' % del_rabbitmq_ctrl_ip, capture = True) sudo("rabbitmqctl forget_cluster_node rabbit@%s" % hostname) # Giving some time for the other nodes to re-adjust the cluster, time.sleep(30) execute(config_rabbitmq) for host_string in env.roledefs[role]: with settings(host_string = host_string): sudo("service rabbitmq-server restart") # Give time for RabbitMQ to recluster time.sleep(30) result = execute(verify_cluster_status) if False in result.values(): print "Unable to recluster RabbitMQ cluster after removing the node %s" % del_rabbitmq_node exit(1)
def drop_analytics_keyspace_node(*args): for host_string in args: with settings(host_string=host_string, warn_only=True): CASSANDRA_CMD = 'cqlsh %s -f ' % hstr_to_ip(get_control_host_string(host_string)) print "Dropping analytics keyspace.. this may take a while.." sudo("echo 'describe keyspace \"ContrailAnalytics\";' > /tmp/cassandra_commands_file") if sudo(CASSANDRA_CMD + '/tmp/cassandra_commands_file').succeeded: sudo("echo 'drop keyspace \"ContrailAnalytics\";' > /tmp/cassandra_commands_file") if not sudo(CASSANDRA_CMD + '/tmp/cassandra_commands_file').succeeded: print "WARN: Drop keyspace ContrailAnalytics failed.." else: print "INFO: keyspace ContrailAnalytics is dropped.." print "INFO: if snapshots are created, manual deletion may be required to free up disk.." sudo("echo 'drop keyspace \"ContrailAnalyticsCql\";' > /tmp/cassandra_commands_file") if not sudo(CASSANDRA_CMD + '/tmp/cassandra_commands_file').succeeded: print "WARN: Drop keyspace ContrailAnalyticsCql failed.." else: print "INFO: keyspace ContrailAnalyticsCql is dropped.." print "INFO: if snapshots are created, manual deletion may be required to free up disk.."
def setup_cmon_schema(): """Task to configure cmon schema in the openstack nodes to monitor galera cluster""" if len(env.roledefs['openstack']) <= 1: print "Single Openstack cluster, skipping cmon schema setup." return openstack_host_list = [get_control_host_string(openstack_host)\ for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host)\ for galera_host in openstack_host_list] internal_vip = get_openstack_internal_vip() mysql_token = run("cat /etc/contrail/mysql.token") pdist = detect_ostype() if pdist in ['Ubuntu']: mysql_svc = 'mysql' elif pdist in ['centos', 'redhat']: mysql_svc = 'mysqld' # Create cmon schema run('mysql -u root -p%s -e "CREATE SCHEMA IF NOT EXISTS cmon"' % mysql_token) run('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_db.sql' % mysql_token) run('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_data.sql' % mysql_token) # insert static data run('mysql -u root -p%s -e "use cmon; insert into cluster(type) VALUES (\'galera\')"' % mysql_token) host_list = galera_ip_list + ['localhost', '127.0.0.1', internal_vip] # Create cmon user for host in host_list: mysql_cmon_user_cmd = 'mysql -u root -p%s -e "CREATE USER \'cmon\'@\'%s\' IDENTIFIED BY \'cmon\'"' % ( mysql_token, host) with settings(hide('everything'),warn_only=True): run(mysql_cmon_user_cmd) mysql_cmd = "mysql -uroot -p%s -e" % mysql_token # Grant privilages for cmon user. for host in host_list: run('%s "GRANT ALL PRIVILEGES on *.* TO cmon@%s IDENTIFIED BY \'cmon\' WITH GRANT OPTION"' % (mysql_cmd, host)) # Restarting mysql in all openstack nodes for host_string in env.roledefs['openstack']: with settings(host_string=host_string): run("service %s restart" % mysql_svc)
def detach_vrouter_node(*args): """Detaches one/more compute node from the existing cluster.""" cfgm_host = get_control_host_string(env.roledefs['cfgm'][0]) cfgm_host_password = env.passwords[env.roledefs['cfgm'][0]] cfgm_ip = hstr_to_ip(cfgm_host) nova_compute = "openstack-nova-compute" if detect_ostype() in ['ubuntu']: nova_compute = "nova-compute" for host_string in args: compute_hostname = socket.gethostbyaddr( hstr_to_ip(host_string))[0].split('.')[0] with settings(host_string=host_string, warn_only=True): sudo("service supervisor-vrouter stop") sudo("service %s stop" % nova_compute) with settings(host_string=cfgm_host, pasword=cfgm_host_password): sudo( "python /opt/contrail/utils/provision_vrouter.py --host_name %s --host_ip %s --api_server_ip %s --oper del" % (compute_hostname, host_string.split('@')[1], cfgm_ip)) execute("restart_control")
def setup_cmon_param_zkonupgrade(): cmon_param = "/etc/contrail/ha/cmon_param" zoo_ip_list = [hstr_to_ip(get_control_host_string(cassandra_host)) for cassandra_host in env.roledefs["database"]] zk_servers_ports = ",".join(["%s:2181" % (s) for s in zoo_ip_list]) zks = 'ZK_SERVER_IP="%s"' % (zk_servers_ports) monitor_galera = "False" if get_contrail_internal_vip(): monitor_galera = "True" # Assuming that keystone is the user and pass # if changed we need to fetch and update these fields keystone_db_user = "******" keystone_db_pass = "******" cmon_db_user = "******" cmon_db_pass = "******" sudo("grep -q 'ZK_SERVER_IP' %s || echo '%s' >> %s" % (cmon_param, zks, cmon_param)) sudo("grep -q 'OS_KS_USER' %s || echo 'OS_KS_USER=%s' >> %s" % (cmon_param, keystone_db_user, cmon_param)) sudo("grep -q 'OS_KS_PASS' %s || echo 'OS_KS_PASS=%s' >> %s" % (cmon_param, keystone_db_pass, cmon_param)) sudo("grep -q 'CMON_USER' %s || echo 'CMON_USER=%s' >> %s" % (cmon_param, cmon_db_user, cmon_param)) sudo("grep -q 'CMON_PASS' %s || echo 'CMON_PASS=%s' >> %s" % (cmon_param, cmon_db_pass, cmon_param)) sudo("grep -q 'MONITOR_GALERA' %s || echo 'MONITOR_GALERA=%s' >> %s" % (cmon_param, monitor_galera, cmon_param))
def fixup_restart_haproxy_in_collector_node(*args): contrail_analytics_api_server_lines = '' space = ' ' * 3 for host_string in env.roledefs['collector']: server_index = env.roledefs['collector'].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) contrail_analytics_api_server_lines +=\ '%s server %s %s:9081 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) for host_string in env.roledefs['collector']: haproxy_config = collector_haproxy.template.safe_substitute({ '__contrail_analytics_api_backend_servers__' : contrail_analytics_api_server_lines, '__contrail_hap_user__': 'haproxy', '__contrail_hap_passwd__': 'contrail123', }) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local("sed -i -e '/^#contrail-collector-marker-start/,/^#contrail-collector-marker-end/d' %s" % (tmp_fname)) local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local("sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) # ...generate new ones cfg_file = open(tmp_fname, 'a') cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg") local("rm %s" %(tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): run("chkconfig haproxy on") enable_haproxy() run("service haproxy restart")
def purge_node_from_rabbitmq_cluster(del_rabbitmq_node, role): if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'no' and\ role == 'openstack': # We are not managing the RabbitMQ server. No-op. return env.roledefs['rabbit'] = env.roledefs[role] del_rabbitmq_ip = hstr_to_ip(del_rabbitmq_node) del_rabbitmq_ctrl_ip = hstr_to_ip(get_control_host_string(del_rabbitmq_node)) if ping_test(del_rabbitmq_node): with settings(host_string = del_rabbitmq_node, warn_only = True): sudo("rabbitmqctl stop_app") sudo("rabbitmqctl reset") sudo("service supervisor-support-service stop") sudo("mv /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.removed") sudo("mv /etc/rabbitmq/rabbitmq.config /etc/rabbitmq/rabbitmq.config.removed") else: # If the node is not reachable, then delete the node remotely from one # of the nodes in the cluster. with settings(host_string = env.roledefs['rabbit'][0], warn_only = True): hostname = local('getent hosts %s | awk \'{print $3\'}' % del_rabbitmq_ctrl_ip, capture = True) sudo("rabbitmqctl forget_cluster_node rabbit@%s" % hostname) # Giving some time for the other nodes to re-adjust the cluster, time.sleep(30) execute(config_rabbitmq) for host_string in env.roledefs[role]: with settings(host_string = host_string): sudo("service rabbitmq-server restart") # Give time for RabbitMQ to recluster time.sleep(30) result = execute(verify_cluster_status) if False in result.values(): print "Unable to recluster RabbitMQ cluster after removing the node %s" % del_rabbitmq_node exit(1)
def setup_cmon_schema(): """Task to configure cmon schema in the openstack nodes to monitor galera cluster""" if len(env.roledefs["openstack"]) <= 1: print "Single Openstack cluster, skipping cmon schema setup." return openstack_host_list = [get_control_host_string(openstack_host) for openstack_host in env.roledefs["openstack"]] galera_ip_list = [hstr_to_ip(galera_host) for galera_host in openstack_host_list] internal_vip = get_openstack_internal_vip() mysql_token = sudo("cat /etc/contrail/mysql.token") pdist = detect_ostype() if pdist in ["ubuntu"]: mysql_svc = "mysql" elif pdist in ["centos", "redhat"]: mysql_svc = "mysqld" # Create cmon schema sudo('mysql -u root -p%s -e "CREATE SCHEMA IF NOT EXISTS cmon"' % mysql_token) sudo("mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_db.sql" % mysql_token) sudo("mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_data.sql" % mysql_token) # insert static data sudo("mysql -u root -p%s -e \"use cmon; insert into cluster(type) VALUES ('galera')\"" % mysql_token) host_list = galera_ip_list + ["localhost", "127.0.0.1", internal_vip] # Create cmon user for host in host_list: mysql_cmon_user_cmd = "mysql -u root -p%s -e \"CREATE USER 'cmon'@'%s' IDENTIFIED BY 'cmon'\"" % ( mysql_token, host, ) with settings(hide("everything"), warn_only=True): sudo(mysql_cmon_user_cmd) mysql_cmd = "mysql -uroot -p%s -e" % mysql_token # Grant privilages for cmon user. for host in host_list: sudo("%s \"GRANT ALL PRIVILEGES on *.* TO cmon@%s IDENTIFIED BY 'cmon' WITH GRANT OPTION\"" % (mysql_cmd, host))
def config_rabbitmq(): rabbit_hosts = [] rabbit_conf = "/etc/rabbitmq/rabbitmq.config" if len(env.roledefs["rabbit"]) <= 1 and detect_ostype() == "redhat": print "CONFIG_RABBITMQ: Skip creating rabbitmq.config for Single node setup" return for host_string in env.roledefs["rabbit"]: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo("hostname -s") + ctrl rabbit_hosts.append("'rabbit@%s'" % host_name) rabbit_hosts = ", ".join(rabbit_hosts) rabbitmq_config_template = rabbitmq_config if len(env.roledefs["rabbit"]) == 1: rabbitmq_config_template = rabbitmq_config_single_node rabbitmq_configs = rabbitmq_config_template.template.safe_substitute( {"__control_intf_ip__": hstr_to_ip(get_control_host_string(env.host_string)), "__rabbit_hosts__": rabbit_hosts} ) tmp_fname = "/tmp/rabbitmq_%s.config" % env.host_string cfg_file = open(tmp_fname, "w") cfg_file.write(rabbitmq_configs) cfg_file.close() put(tmp_fname, "/etc/rabbitmq/rabbitmq.config", use_sudo=True) local("rm %s" % (tmp_fname))
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): """ Configure test environment by creating sanity_params.ini and sanity_testbed.json files """ print "Configuring test environment" sys.path.insert(0, contrail_fab_path) from fabfile.config import testbed from fabfile.utils.host import get_openstack_internal_vip, \ get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ get_authserver_port, get_env_passwords, get_authserver_credentials, \ get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \ get_authserver_protocol, get_region_name, get_contrail_internal_vip, \ get_openstack_external_vip, get_contrail_external_vip, \ get_apiserver_protocol, get_apiserver_certfile, get_apiserver_keyfile, \ get_apiserver_cafile, get_keystone_insecure_flag, \ get_apiserver_insecure_flag, get_keystone_certfile, get_keystone_keyfile, \ get_keystone_cafile, get_keystone_version from fabfile.utils.multitenancy import get_mt_enable from fabfile.utils.interface import get_data_ip from fabfile.tasks.install import update_config_option, update_js_config from fabfile.utils.fabos import get_as_sudo logger = contrail_logging.getLogger(__name__) def validate_and_copy_file(filename, source_host): with settings(host_string='%s' %(source_host), warn_only=True, abort_on_prompts=False): if exists(filename): filedir = os.path.dirname(filename) if not os.path.exists(filedir): os.makedirs(filedir) get_as_sudo(filename, filename) return filename return "" cfgm_host = env.roledefs['cfgm'][0] auth_protocol = get_authserver_protocol() try: auth_server_ip = get_authserver_ip() except Exception: auth_server_ip = None auth_server_port = get_authserver_port() api_auth_protocol = get_apiserver_protocol() if api_auth_protocol == 'https': api_certfile = validate_and_copy_file(get_apiserver_certfile(), cfgm_host) api_keyfile = validate_and_copy_file(get_apiserver_keyfile(), cfgm_host) api_cafile = validate_and_copy_file(get_apiserver_cafile(), cfgm_host) api_insecure_flag = get_apiserver_insecure_flag() else: api_certfile = "" api_keyfile = "" api_cafile = "" api_insecure_flag = True cert_dir = os.path.dirname(api_certfile) if auth_protocol == 'https': keystone_cafile = validate_and_copy_file(cert_dir + '/' +\ os.path.basename(get_keystone_cafile()), cfgm_host) keystone_certfile = validate_and_copy_file(cert_dir + '/' +\ os.path.basename(get_keystone_certfile()), cfgm_host) keystone_keyfile = keystone_certfile keystone_insecure_flag = istrue(os.getenv('OS_INSECURE', \ get_keystone_insecure_flag())) else: keystone_certfile = "" keystone_keyfile = "" keystone_cafile = "" keystone_insecure_flag = True with settings(warn_only=True), hide('everything'): with lcd(contrail_fab_path): if local('git branch').succeeded: fab_revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): fab_revision = run('cat /opt/contrail/contrail_packages/VERSION') with lcd(test_dir): if local('git branch').succeeded: revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): revision = run('cat /opt/contrail/contrail_packages/VERSION') sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms':[], 'vcenter_servers':[], 'hosts_ipmi': [], 'tor':[], 'sriov':[], 'dpdk':[], 'ns_agilio_vrouter':[], } sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if not getattr(env, 'test', None): env.test={} containers = env.test.get('containers') traffic_data = env.test.get('traffic_data') ixia_linux_host_ip = get_value_of_key(traffic_data, 'ixia_linux_host_ip') ixia_host_ip = get_value_of_key(traffic_data, 'ixia_host_ip') spirent_linux_host_ip = get_value_of_key(traffic_data, 'spirent_linux_host_ip') ixia_linux_username = get_value_of_key(traffic_data, 'ixia_linux_username') ixia_linux_password = get_value_of_key(traffic_data, 'ixia_linux_password') spirent_linux_username = get_value_of_key(traffic_data, 'spirent_linux_username') spirent_linux_password = get_value_of_key(traffic_data, 'spirent_linux_password') if env.get('orchestrator', 'openstack') == 'openstack': with settings(host_string = env.roledefs['openstack'][0]), hide('everything'): openstack_host_name = run("hostname") with settings(host_string = env.roledefs['cfgm'][0]), hide('everything'): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host), hide('everything'): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host), hide('everything'): host_name = run("hostname") cassandra_host_names.append(host_name) keystone_version = get_keystone_version() internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() contrail_internal_vip = get_contrail_internal_vip() contrail_external_vip = get_contrail_external_vip() multi_role_test = False for host_string in env.roledefs['all']: if host_string in env.roledefs.get('test',[]): for role in env.roledefs.iterkeys(): if role in ['test','all']: continue if host_string in env.roledefs.get(role,[]): multi_role_test=True break if not multi_role_test: continue host_ip = host_string.split('@')[1] with settings(host_string = host_string), hide('everything'): try: host_name = run("hostname") host_fqname = run("hostname -f") except: logger.warn('Unable to login to %s'%host_ip) continue host_dict = {} host_dict['ip'] = host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['fqname'] = host_fqname host_dict['username'] = host_string.split('@')[0] host_dict['password'] =get_env_passwords(host_string) host_dict['roles'] = [] if env.get('qos', {}): if host_string in env.qos.keys(): role_dict = env.qos[host_string] host_dict['qos'] = role_dict if env.get('qos_niantic', {}): if host_string in env.qos_niantic.keys(): role_dict = env.qos_niantic[host_string] host_dict['qos_niantic'] = role_dict if host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} role_dict['container'] = get_container_name(containers, host_string, 'openstack') host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} role_dict['container'] = get_container_name(containers, host_string, 'controller') if env.get('orchestrator', 'openstack') == 'openstack': role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['container'] = get_container_name(containers, host_string, 'controller') host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } role_dict['container'] = get_container_name(containers, host_string, 'analyticsdb') host_dict['roles'].append(role_dict) if not env.roledefs.get('compute'): env.roledefs['compute'] = [] if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['container'] = get_container_name(containers, host_string, 'agent') role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if host_string in env.roledefs.get('lb',[]): role_dict = {'type': 'lb', 'params': {'lb': host_name}} role_dict['container'] = get_container_name(containers, host_string, 'lb') host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } role_dict['container'] = get_container_name(containers, host_string, 'analytics') host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } role_dict['container'] = get_container_name(containers, host_string, 'controller') host_dict['roles'].append(role_dict) # Kube managers if 'contrail-kubernetes' in env.roledefs.keys() and \ host_string in env.roledefs['contrail-kubernetes']: role_dict = { 'type': 'contrail-kubernetes', 'params': {} } role_dict['container'] = get_container_name(containers, host_string, 'contrail-kube-manager') host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) #get sriov info if env.has_key('sriov'): sanity_testbed_dict['sriov'].append(env.sriov) #get dpdk info if env.has_key('dpdk'): sanity_testbed_dict['dpdk'].append(env.dpdk) #get k8s info sanity_testbed_dict['kubernetes'] = env.get('kubernetes', {}) #get ns_agilio_vrouter info if env.has_key('ns_agilio_vrouter'): sanity_testbed_dict['ns_agilio_vrouter'].append(env.ns_agilio_vrouter) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts if env.has_key('xmpp_auth_enable'): sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable if env.has_key('xmpp_dns_auth_enable'): sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable if env.has_key('metadata_ssl_enable'): sanity_testbed_dict['metadata_ssl_enable'] = env.metadata_ssl_enable if env.has_key('dm_mx'): sanity_testbed_dict['dm_mx'] = env.dm_mx # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'contrail_vm' in esxi_hosts[esxi]: host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host'] host_dict['roles'] = [] host_dict['type'] = 'esxi' sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) vcenter_servers = env.get('vcenter_servers') if vcenter_servers: for vcenter in vcenter_servers: sanity_testbed_dict['vcenter_servers'].append(vcenter_servers[vcenter]) orch = getattr(env, 'orchestrator', 'openstack') deployer = getattr(env, 'deployer', 'openstack') #get other orchestrators (vcenter etc) info if any slave_orch = None if env.has_key('other_orchestrators'): sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators for k,v in env.other_orchestrators.items(): if v['type'] == 'vcenter': slave_orch = 'vcenter' # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) # Setting slave orch to k8s when key present if env.has_key('kubernetes'): if sanity_testbed_dict['kubernetes']['mode'] == 'nested': slave_orch = 'kubernetes' # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stack_user = os.getenv('STACK_USER', env.get('stack_user', env.test.get('stack_user', ''))) stack_password = os.getenv('STACK_PASSWORD', env.test.get('stack_password','')) stack_tenant = os.getenv('STACK_TENANT', env.get('stack_tenant', env.test.get('stack_tenant', ''))) stack_domain = os.getenv('STACK_DOMAIN', env.get('stack_domain', env.test.get('stack_domain', ''))) use_project_scoped_token = env.test.get('use_project_scoped_token', '') if not env.has_key('domain_isolation'): env.domain_isolation = False if not env.has_key('cloud_admin_domain'): env.cloud_admin_domain = 'Default' if not env.has_key('cloud_admin_user'): env.cloud_admin_user = '******' if not env.has_key('cloud_admin_password'): env.cloud_admin_password = env.get('openstack_admin_password') domain_isolation = os.getenv('DOMAIN_ISOLATION', env.test.get('domain_isolation', env.domain_isolation)) cloud_admin_domain = os.getenv('CLOUD_ADMIN_DOMAIN', env.test.get('cloud_admin_domain', env.cloud_admin_domain)) cloud_admin_user = os.getenv('CLOUD_ADMIN_USER', env.test.get('cloud_admin_user', env.cloud_admin_user)) cloud_admin_password = os.getenv('CLOUD_ADMIN_PASSWORD', env.test.get('cloud_admin_password', env.cloud_admin_password)) tenant_isolation = os.getenv('TENANT_ISOLATION', env.test.get('tenant_isolation', '')) stop_on_fail = env.get('stop_on_fail', False) mail_to = os.getenv('MAIL_TO', env.test.get('mail_to', '')) log_scenario = env.get('log_scenario', 'Sanity') stack_region_name = get_region_name() admin_user, admin_password = get_authserver_credentials() if orch == 'kubernetes': admin_tenant = 'default' else: admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser webserver_host = os.getenv('WEBSERVER_HOST', env.test.get('webserver_host','')) webserver_user = os.getenv('WEBSERVER_USER', env.test.get('webserver_user', '')) webserver_password = os.getenv('WEBSERVER_PASSWORD', env.test.get('webserver_password', '')) webserver_log_path = os.getenv('WEBSERVER_LOG_PATH', env.test.get('webserver_log_path', '/var/www/contrail-test-ci/logs/')) webserver_report_path = os.getenv('WEBSERVER_REPORT_PATH', env.test.get('webserver_report_path', '/var/www/contrail-test-ci/reports/')) webroot = os.getenv('WEBROOT', env.test.get('webroot', 'contrail-test-ci')) mail_server = os.getenv('MAIL_SERVER', env.test.get('mail_server', '')) mail_port = os.getenv('MAIL_PORT', env.test.get('mail_port', '25')) fip_pool_name = os.getenv('FIP_POOL_NAME', env.test.get('fip_pool_name', 'floating-ip-pool')) public_virtual_network = os.getenv('PUBLIC_VIRTUAL_NETWORK', env.test.get('public_virtual_network', 'public')) public_tenant_name = os.getenv('PUBLIC_TENANT_NAME', env.test.get('public_tenant_name', 'admin')) fixture_cleanup = os.getenv('FIXTURE_CLEANUP', env.test.get('fixture_cleanup', 'yes')) generate_html_report = os.getenv('GENERATE_HTML_REPORT', env.test.get('generate_html_report', 'True')) keypair_name = os.getenv('KEYPAIR_NAME', env.test.get('keypair_name', 'contrail_key')) mail_sender = os.getenv('MAIL_SENDER', env.test.get('mail_sender', '*****@*****.**')) discovery_ip = os.getenv('DISCOVERY_IP', env.test.get('discovery_ip', '')) config_api_ip = os.getenv('CONFIG_API_IP', env.test.get('config_api_ip', '')) analytics_api_ip = os.getenv('ANALYTICS_API_IP', env.test.get('analytics_api_ip', '')) discovery_port = os.getenv('DISCOVERY_PORT', env.test.get('discovery_port', '')) config_api_port = os.getenv('CONFIG_API_PORT', env.test.get('config_api_port', '')) analytics_api_port = os.getenv('ANALYTICS_API_PORT', env.test.get('analytics_api_port', '')) control_port = os.getenv('CONTROL_PORT', env.test.get('control_port', '')) dns_port = os.getenv('DNS_PORT', env.test.get('dns_port', '')) agent_port = os.getenv('AGENT_PORT', env.test.get('agent_port', '')) user_isolation = os.getenv('USER_ISOLATION', env.test.get('user_isolation', False if stack_user else True)) neutron_username = os.getenv('NEUTRON_USERNAME', env.test.get('neutron_username', None)) availability_zone = os.getenv('AVAILABILITY_ZONE', env.test.get('availability_zone', None)) ci_flavor = os.getenv('CI_FLAVOR', env.test.get('ci_flavor', None)) kube_config_file = env.test.get('kube_config_file', '/etc/kubernetes/admin.conf') openshift_src_config_file = env.test.get('openshift_src_config_file', '/root/.kube/config') use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) fabric_gw = getattr(testbed, 'fabric_gw', '') fabric_gw_info = str(fabric_gw) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if not env.has_key('openstack'): env.openstack = {} if not env.has_key('cfgm'): env.cfgm = {} config_amqp_ip = env.openstack.get('amqp_host', '') if config_amqp_ip: config_amqp_ips = [config_amqp_ip] else: config_amqp_ips = [] # If amqp details are in env.cfgm as well, use that config_amqp_port = env.cfgm.get('amqp_port', '5672') config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips) key_filename = env.get('key_filename', '') pubkey_filename = env.get('pubkey_filename', '') vcenter_dc = '' if orch == 'vcenter' or slave_orch== 'vcenter': public_tenant_name='vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for vc in env.vcenter_servers: for dc in env.vcenter_servers[vc]['datacenters']: vcenter_dc = dc #global controller gc_host_mgmt = getattr(testbed, 'gc_host_mgmt', '') gc_host_control_data = getattr(testbed, 'gc_host_control_data', '') gc_user_name = getattr(testbed, 'gc_user_name', '') gc_user_pwd = getattr(testbed, 'gc_user_pwd', '') keystone_password = getattr(testbed, 'keystone_password', '') sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__keystone_version__' : keystone_version, '__use_project_scoped_token__': use_project_scoped_token, '__nova_keypair_name__' : keypair_name, '__orch__' : orch, '__deployer__' : deployer, '__admin_user__' : admin_user, '__admin_password__' : admin_password, '__admin_tenant__' : admin_tenant, '__domain_isolation__' : domain_isolation, '__cloud_admin_domain__' : cloud_admin_domain, '__cloud_admin_user__' : cloud_admin_user, '__cloud_admin_password__': cloud_admin_password, '__tenant_isolation__' : tenant_isolation, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__auth_ip__' : auth_server_ip, '__auth_port__' : auth_server_port, '__auth_protocol__' : auth_protocol, '__stack_region_name__' : stack_region_name, '__stack_tenant__' : stack_tenant, '__stack_domain__' : stack_domain, '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__key_filename__' : key_filename, '__pubkey_filename__' : pubkey_filename, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mail_sender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : False, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__fabric_gw_name_ip_tuple__': fabric_gw_info, '__public_vn_name__' : fip_pool_name, '__public_virtual_network__':public_virtual_network, '__public_tenant_name__' :public_tenant_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', ''), '__contrail_internal_vip__' : contrail_internal_vip, '__contrail_external_vip__' : contrail_external_vip, '__internal_vip__' : internal_vip, '__external_vip__' : external_vip, '__vcenter_dc__' : vcenter_dc, '__vcenter_server__' : get_vcenter_ip(), '__vcenter_port__' : get_vcenter_port(), '__vcenter_username__' : get_vcenter_username(), '__vcenter_password__' : get_vcenter_password(), '__vcenter_datacenter__' : get_vcenter_datacenter(), '__vcenter_compute__' : get_vcenter_compute(), '__use_devicemanager_for_md5__' : use_devicemanager_for_md5, '__discovery_port__' : discovery_port, '__config_api_port__' : config_api_port, '__analytics_api_port__' : analytics_api_port, '__control_port__' : control_port, '__dns_port__' : dns_port, '__vrouter_agent_port__' : agent_port, '__discovery_ip__' : discovery_ip, '__config_api_ip__' : config_api_ip, '__analytics_api_ip__' : analytics_api_ip, '__user_isolation__' : user_isolation, '__neutron_username__' : neutron_username, '__availability_zone__' : availability_zone, '__ci_flavor__' : ci_flavor, '__config_amqp_ips__' : ','.join(config_amqp_ips), '__config_amqp_port__' : config_amqp_port, '__api_auth_protocol__' : api_auth_protocol, '__api_certfile__' : api_certfile, '__api_keyfile__' : api_keyfile, '__api_cafile__' : api_cafile, '__api_insecure_flag__' : api_insecure_flag, '__keystone_certfile__' : keystone_certfile, '__keystone_keyfile__' : keystone_keyfile, '__keystone_cafile__' : keystone_cafile, '__keystone_insecure_flag__': keystone_insecure_flag, '__gc_host_mgmt__' : gc_host_mgmt, '__gc_host_control_data__': gc_host_control_data, '__gc_user_name__' : gc_user_name, '__gc_user_pwd__' : gc_user_pwd, '__keystone_password__' : keystone_password, '__slave_orch__' : slave_orch, '__ixia_linux_host_ip__' : ixia_linux_host_ip, '__ixia_host_ip__' : ixia_host_ip, '__spirent_linux_host_ip__': spirent_linux_host_ip, '__ixia_linux_username__' : ixia_linux_username, '__ixia_linux_password__' : ixia_linux_password, '__spirent_linux_username__': spirent_linux_username, '__spirent_linux_password__': spirent_linux_password, }) ini_file = test_dir + '/' + 'sanity_params.ini' testbed_json_file = test_dir + '/' + 'sanity_testbed.json' with open(ini_file, 'w') as ini: ini.write(sanity_params) with open(testbed_json_file,'w') as tb: tb.write(sanity_testbed_json) # Create /etc/contrail/openstackrc if not os.path.exists('/etc/contrail'): os.makedirs('/etc/contrail') keycertbundle = None if keystone_cafile and keystone_keyfile and keystone_certfile: bundle = '/tmp/keystonecertbundle.pem' certs = [keystone_certfile, keystone_keyfile, keystone_cafile] keycertbundle = utils.getCertKeyCaBundle(bundle, certs) with open('/etc/contrail/openstackrc','w') as rc: rc.write("export OS_USERNAME=%s\n" % admin_user) rc.write("export OS_PASSWORD=%s\n" % admin_password) rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant) rc.write("export OS_REGION_NAME=%s\n" % stack_region_name) rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol, auth_server_ip, auth_server_port)) rc.write("export OS_CACERT=%s\n" % keycertbundle) rc.write("export OS_CERT=%s\n" % keystone_certfile) rc.write("export OS_KEY=%s\n" % keystone_keyfile) rc.write("export OS_INSECURE=%s\n" % keystone_insecure_flag) rc.write("export OS_NO_CACHE=1\n") # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone config = ConfigParser.ConfigParser() config.optionxform = str vnc_api_ini = '/etc/contrail/vnc_api_lib.ini' if os.path.exists(vnc_api_ini): config.read(vnc_api_ini) if 'auth' not in config.sections(): config.add_section('auth') config.set('auth','AUTHN_TYPE', 'keystone') config.set('auth','AUTHN_PROTOCOL', auth_protocol) config.set('auth','AUTHN_SERVER', auth_server_ip) config.set('auth','AUTHN_PORT', auth_server_port) if keystone_version == 'v3': config.set('auth','AUTHN_URL', '/v3/auth/tokens') else: config.set('auth','AUTHN_URL', '/v2.0/tokens') if api_auth_protocol == 'https': if 'global' not in config.sections(): config.add_section('global') config.set('global','certfile', api_certfile) config.set('global','cafile', api_cafile) config.set('global','keyfile', api_keyfile) config.set('global','insecure',api_insecure_flag) if auth_protocol == 'https': if 'auth' not in config.sections(): config.add_section('auth') config.set('auth','certfile', keystone_certfile) config.set('auth','cafile', keystone_cafile) config.set('auth','keyfile', keystone_keyfile) config.set('auth','insecure', keystone_insecure_flag) with open(vnc_api_ini,'w') as f: config.write(f) # Get kube config file to the testrunner node if orch == 'kubernetes' or slave_orch == 'kubernetes': if not os.path.exists(kube_config_file): dir_name = os.path.dirname(kube_config_file) if not os.path.exists(dir_name): os.makedirs(dir_name) with settings(host_string = env.kubernetes['master']): if deployer == 'openshift' : get(openshift_src_config_file, kube_config_file) else: get(kube_config_file, kube_config_file) # If webui = True, in testbed, setup webui for sanity if webui: sku = get_build_sku(cfgm_host) update_config_option('openstack', '/etc/keystone/keystone.conf', 'token', 'expiration', '86400','keystone', sku) update_js_config('webui', '/etc/contrail/config.global.js', 'contrail-webui', container=is_container_env)
def get_real_hostname(host_string): with settings(host_string = host_string): tgt_ip = hstr_to_ip(get_control_host_string(env.host_string)) tgt_hostname = sudo("hostname") return tgt_hostname
def fixup_restart_haproxy_in_openstack_node(*args): keystone_server_lines = '' keystone_admin_server_lines = '' glance_server_lines = '' cinder_server_lines = '' ceph_restapi_server_lines = '' nova_api_server_lines = '' nova_meta_server_lines = '' nova_vnc_server_lines = '' memcached_server_lines = '' rabbitmq_server_lines = '' mysql_server_lines = '' space = ' ' * 3 for host_string in env.roledefs['openstack']: server_index = env.roledefs['openstack'].index(host_string) + 1 mgmt_host_ip = hstr_to_ip(host_string) host_ip = hstr_to_ip(get_control_host_string(host_string)) keystone_server_lines +=\ '%s server %s %s:6000 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) keystone_admin_server_lines +=\ '%s server %s %s:35358 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) glance_server_lines +=\ '%s server %s %s:9393 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) cinder_server_lines +=\ '%s server %s %s:9776 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) ceph_restapi_server_lines +=\ '%s server %s %s:5006 check inter 2000 rise 2 fall 3\n'\ % (space, host_ip, host_ip) nova_api_server_lines +=\ '%s server %s %s:9774 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) nova_meta_server_lines +=\ '%s server %s %s:9775 check inter 2000 rise 2 fall 1\n'\ % (space, host_ip, host_ip) nova_vnc_server_lines +=\ '%s server %s %s:6999 check inter 2000 rise 2 fall 3\n'\ % (space, mgmt_host_ip, mgmt_host_ip) if server_index <= 2: memcached_server_lines +=\ '%s server repcache%s %s:11211 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) if server_index == 1: rabbitmq_server_lines +=\ '%s server rabbit%s %s:5672 weight 200 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) else: rabbitmq_server_lines +=\ '%s server rabbit%s %s:5672 weight 100 check inter 2000 rise 2 fall 3 backup\n'\ % (space, server_index, host_ip) if server_index == 1: mysql_server_lines +=\ '%s server mysql%s %s:3306 weight 200 check inter 2000 rise 2 fall 3\n'\ % (space, server_index, host_ip) else: mysql_server_lines +=\ '%s server mysql%s %s:3306 weight 100 check inter 2000 rise 2 fall 3 backup\n'\ % (space, server_index, host_ip) for host_string in env.roledefs['openstack']: haproxy_config = openstack_haproxy.template.safe_substitute({ '__keystone_backend_servers__': keystone_server_lines, '__keystone_admin_backend_servers__': keystone_admin_server_lines, '__glance_backend_servers__': glance_server_lines, '__cinder_backend_servers__': cinder_server_lines, '__ceph_restapi_backend_servers__': ceph_restapi_server_lines, '__nova_api_backend_servers__': nova_api_server_lines, '__nova_meta_backend_servers__': nova_meta_server_lines, '__nova_vnc_backend_servers__': nova_vnc_server_lines, '__memcached_servers__': memcached_server_lines, '__rabbitmq_servers__': rabbitmq_server_lines, '__mysql_servers__': mysql_server_lines, '__contrail_hap_user__': 'haproxy', '__contrail_hap_passwd__': 'contrail123', }) for host_string in args: with settings(host_string=host_string): # chop old settings including pesky default from pkg... tmp_fname = "/tmp/haproxy-%s-config" % (host_string) get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname) with settings(warn_only=True): local( "sed -i -e '/^#contrail-openstack-marker-start/,/^#contrail-openstack-marker-end/d' %s" % (tmp_fname)) local( "sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" % (tmp_fname)) local("sed -i -e 's/*:5000/*:5001/' %s" % (tmp_fname)) local( "sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" % (tmp_fname)) local( "sed -i -e 's/option\shttplog/option tcplog/' %s" % (tmp_fname)) local("sed -i -e 's/maxconn 4096/maxconn 100000/' %s" % (tmp_fname)) local('sed -i "/^global/a\\ tune.bufsize 16384" %s' % tmp_fname) local('sed -i "/^global/a\\ tune.maxrewrite 1024" %s' % tmp_fname) local('sed -i "/^global/a\ spread-checks 4" %s' % tmp_fname) # Remove default HA config local("sed -i '/listen\sappli1-rewrite/,/rspidel/d' %s" % tmp_fname) local("sed -i '/listen\sappli3-relais/,/rspidel/d' %s" % tmp_fname) # ...generate new ones cfg_file = open(tmp_fname, 'a') cfg_file.write(haproxy_config) cfg_file.close() put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True) local("rm %s" % (tmp_fname)) # haproxy enable with settings(host_string=host_string, warn_only=True): sudo("chkconfig haproxy on") sudo("service supervisor-openstack stop") enable_haproxy() sudo("service haproxy restart") #Change the keystone admin/public port sudo( "openstack-config --set /etc/keystone/keystone.conf DEFAULT public_port 6000" ) sudo( "openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_port 35358" )
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): """ Configure test environment by creating sanity_params.ini and sanity_testbed.json files """ print "Configuring test environment" sys.path.insert(0, contrail_fab_path) from fabfile.testbeds import testbed from fabfile.utils.host import get_openstack_internal_vip, \ get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ get_authserver_port, get_env_passwords, get_authserver_credentials, \ get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \ get_authserver_protocol, get_region_name, get_contrail_internal_vip, \ get_openstack_external_vip, get_contrail_external_vip from fabfile.utils.multitenancy import get_mt_enable from fabfile.utils.interface import get_data_ip from fabfile.tasks.install import update_config_option, update_js_config cfgm_host = env.roledefs['cfgm'][0] auth_protocol = get_authserver_protocol() auth_server_ip = get_authserver_ip() auth_server_port = get_authserver_port() with settings(warn_only=True), hide('everything'): with lcd(contrail_fab_path): if local('git branch').succeeded: fab_revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): fab_revision = run( 'cat /opt/contrail/contrail_packages/VERSION') with lcd(test_dir): if local('git branch').succeeded: revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): revision = run( 'cat /opt/contrail/contrail_packages/VERSION') sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms': [], 'vcenter_servers': [], 'hosts_ipmi': [], 'tor': [], } sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if env.get('orchestrator', 'openstack') != 'vcenter': with settings( host_string=env.roledefs['openstack'][0]), hide('everything'): openstack_host_name = run("hostname") with settings(host_string=env.roledefs['cfgm'][0]), hide('everything'): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string=control_host), hide('everything'): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string=cassandra_host), hide('everything'): host_name = run("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() contrail_internal_vip = get_contrail_internal_vip() contrail_external_vip = get_contrail_external_vip() multi_role_test = False for host_string in env.roledefs['all']: if host_string in env.roledefs.get('test', []): for role in env.roledefs.iterkeys(): if role in ['test', 'all']: continue if host_string in env.roledefs.get(role, []): multi_role_test = True break if not multi_role_test: continue host_ip = host_string.split('@')[1] with settings(host_string=host_string), hide('everything'): host_name = run("hostname") host_dict = {} host_dict['ip'] = host_ip host_dict['data-ip'] = get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip'] = get_control_host_string(host_string).split( '@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] = get_env_passwords(host_string) host_dict['roles'] = [] if host_string in env.roledefs['openstack']: role_dict = { 'type': 'openstack', 'params': { 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = { 'type': 'cfgm', 'params': { 'collector': host_name, 'cassandra': ' '.join(cassandra_host_names) } } if env.get('orchestrator', 'openstack') != 'vcenter': role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = { 'type': 'bgp', 'params': { 'collector': cfgm_host_name, 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys( ) and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': { 'cassandra': ' '.join(cassandra_host_names) } } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = { 'type': 'compute', 'params': { 'collector': cfgm_host_name, 'cfgm': cfgm_host_name } } role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys( ) and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': { 'cassandra': ' '.join(cassandra_host_names) } } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys( ) and host_string in env.roledefs['webui']: role_dict = {'type': 'webui', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts if env.has_key('xmpp_auth_enable'): sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable if env.has_key('xmpp_dns_auth_enable'): sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'contrail_vm' in esxi_hosts[esxi]: host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm'][ 'host'] host_dict['roles'] = [] sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) vcenter_servers = env.get('vcenter_servers') if vcenter_servers: for vcenter in vcenter_servers: host_dict = {} host_dict['server'] = vcenter_servers[vcenter]['server'] host_dict['port'] = vcenter_servers[vcenter]['port'] host_dict['username'] = vcenter_servers[vcenter]['username'] host_dict['password'] = vcenter_servers[vcenter]['password'] host_dict['datacenter'] = vcenter_servers[vcenter]['datacenter'] host_dict['auth'] = vcenter_servers[vcenter]['auth'] host_dict['cluster'] = vcenter_servers[vcenter]['cluster'] host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_switch'][ 'dv_switch_name'] #Mostly we do not use the below info for vcenter sanity tests. #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'dv_port_group' in vcenter_servers[vcenter]: host_dict['dv_port_group'] = vcenter_servers[vcenter][ 'dv_port_group']['dv_portgroup_name'] sanity_testbed_dict['vcenter_servers'].append(host_dict) #get other orchestrators (vcenter etc) info if any slave_orch = None if env.has_key('other_orchestrators'): sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators for k, v in env.other_orchestrators.items(): if v['type'] == 'vcenter': slave_orch = 'vcenter' # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) if not getattr(env, 'test', None): env.test = {} # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stack_user = env.test.get('stack_user', os.getenv('STACK_USER') or '') stack_password = env.test.get('stack_password', os.getenv('STACK_PASSWORD') or '') stack_tenant = env.test.get('stack_tenant', os.getenv('STACK_TENANT') or '') tenant_isolation = env.test.get('tenant_isolation', os.getenv('TENANT_ISOLATION') or '') stop_on_fail = env.get('stop_on_fail', False) mail_to = env.test.get('mail_to', os.getenv('MAIL_TO') or '') log_scenario = env.get('log_scenario', 'Sanity') stack_region_name = get_region_name() admin_user, admin_password = get_authserver_credentials() admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = env.get('stack_domain', 'default-domain') webserver_host = env.test.get('webserver_host', os.getenv('WEBSERVER_HOST') or '') webserver_user = env.test.get('webserver_user', os.getenv('WEBSERVER_USER') or '') webserver_password = env.test.get('webserver_password', os.getenv('WEBSERVER_PASSWORD') or '') webserver_log_path = env.test.get( 'webserver_log_path', os.getenv('WEBSERVER_LOG_PATH') or '/var/www/contrail-test-ci/logs/') webserver_report_path = env.test.get( 'webserver_report_path', os.getenv('WEBSERVER_REPORT_PATH') or '/var/www/contrail-test-ci/reports/') webroot = env.test.get('webroot', os.getenv('WEBROOT') or 'contrail-test-ci') mail_server = env.test.get('mail_server', os.getenv('MAIL_SERVER') or '') mail_port = env.test.get('mail_port', os.getenv('MAIL_PORT') or '25') fip_pool_name = env.test.get( 'fip_pool_name', os.getenv('FIP_POOL_NAME') or 'floating-ip-pool') public_virtual_network = env.test.get( 'public_virtual_network', os.getenv('PUBLIC_VIRTUAL_NETWORK') or 'public') public_tenant_name = env.test.get( 'public_tenant_name', os.getenv('PUBLIC_TENANT_NAME') or 'admin') fixture_cleanup = env.test.get('fixture_cleanup', os.getenv('FIXTURE_CLEANUP') or 'yes') generate_html_report = env.test.get( 'generate_html_report', os.getenv('GENERATE_HTML_REPORT') or 'True') keypair_name = env.test.get('keypair_name', os.getenv('KEYPAIR_NAME') or 'contrail_key') mail_sender = env.test.get( 'mail_sender', os.getenv('MAIL_SENDER') or '*****@*****.**') discovery_ip = env.test.get('discovery_ip', os.getenv('DISCOVERY_IP') or '') config_api_ip = env.test.get('config_api_ip', os.getenv('CONFIG_API_IP') or '') analytics_api_ip = env.test.get('analytics_api_ip', os.getenv('ANALYTICS_API_IP') or '') discovery_port = env.test.get('discovery_port', os.getenv('DISCOVERY_PORT') or '') config_api_port = env.test.get('config_api_port', os.getenv('CONFIG_API_PORT') or '') analytics_api_port = env.test.get('analytics_api_port', os.getenv('ANALYTICS_API_PORT') or '') control_port = env.test.get('control_port', os.getenv('CONTROL_PORT') or '') dns_port = env.test.get('dns_port', os.getenv('DNS_PORT') or '') agent_port = env.test.get('agent_port', os.getenv('AGENT_PORT') or '') user_isolation = env.test.get('user_isolation', bool(os.getenv('USER_ISOLATION') or True)) use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) orch = getattr(env, 'orchestrator', 'openstack') router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if not env.has_key('openstack'): env.openstack = {} if not env.has_key('cfgm'): env.cfgm = {} config_amqp_ip = env.openstack.get('amqp_host', '') if config_amqp_ip: config_amqp_ips = [config_amqp_ip] else: config_amqp_ips = [] # If amqp details are in env.cfgm as well, use that config_amqp_port = env.cfgm.get('amqp_port', '5672') config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips) key_filename = env.get('key_filename', '') pubkey_filename = env.get('pubkey_filename', '') vcenter_dc = '' if orch == 'vcenter' or slave_orch == 'vcenter': public_tenant_name = 'vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for k in env.vcenter_servers: vcenter_dc = env.vcenter_servers[k]['datacenter'] sanity_params = sanity_ini_templ.safe_substitute({ '__testbed_json_file__': 'sanity_testbed.json', '__nova_keypair_name__': keypair_name, '__orch__': orch, '__admin_user__': admin_user, '__admin_password__': admin_password, '__admin_tenant__': admin_tenant, '__tenant_isolation__': tenant_isolation, '__stack_user__': stack_user, '__stack_password__': stack_password, '__auth_ip__': auth_server_ip, '__auth_port__': auth_server_port, '__auth_protocol__': auth_protocol, '__stack_region_name__': stack_region_name, '__stack_tenant__': stack_tenant, '__stack_domain__': stack_domain, '__multi_tenancy__': get_mt_enable(), '__address_family__': get_address_family(), '__log_scenario__': log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__': fixture_cleanup, '__key_filename__': key_filename, '__pubkey_filename__': pubkey_filename, '__webserver__': webserver_host, '__webserver_user__': webserver_user, '__webserver_password__': webserver_password, '__webserver_log_dir__': webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__': webroot, '__mail_server__': mail_server, '__mail_port__': mail_port, '__sender_mail_id__': mail_sender, '__receiver_mail_id__': mail_to, '__http_proxy__': env.get('http_proxy', ''), '__ui_browser__': ui_browser, '__ui_config__': ui_config, '__horizon__': horizon, '__webui__': webui, '__devstack__': False, '__public_vn_rtgt__': public_vn_rtgt, '__router_asn__': router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__': fip_pool_name, '__public_virtual_network__': public_virtual_network, '__public_tenant_name__': public_tenant_name, '__public_vn_subnet__': public_vn_subnet, '__test_revision__': revision, '__fab_revision__': fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__': stop_on_fail, '__ha_setup__': getattr(testbed, 'ha_setup', ''), '__ipmi_username__': getattr(testbed, 'ipmi_username', ''), '__ipmi_password__': getattr(testbed, 'ipmi_password', ''), '__contrail_internal_vip__': contrail_internal_vip, '__contrail_external_vip__': contrail_external_vip, '__internal_vip__': internal_vip, '__external_vip__': external_vip, '__vcenter_dc__': vcenter_dc, '__vcenter_server__': get_vcenter_ip(), '__vcenter_port__': get_vcenter_port(), '__vcenter_username__': get_vcenter_username(), '__vcenter_password__': get_vcenter_password(), '__vcenter_datacenter__': get_vcenter_datacenter(), '__vcenter_compute__': get_vcenter_compute(), '__use_devicemanager_for_md5__': use_devicemanager_for_md5, '__discovery_port__': discovery_port, '__config_api_port__': config_api_port, '__analytics_api_port__': analytics_api_port, '__control_port__': control_port, '__dns_port__': dns_port, '__vrouter_agent_port__': agent_port, '__discovery_ip__': discovery_ip, '__config_api_ip__': config_api_ip, '__analytics_api_ip__': analytics_api_ip, '__user_isolation__': user_isolation, '__config_amqp_ips__': ','.join(config_amqp_ips), '__config_amqp_port__': config_amqp_port, }) ini_file = test_dir + '/' + 'sanity_params.ini' testbed_json_file = test_dir + '/' + 'sanity_testbed.json' with open(ini_file, 'w') as ini: ini.write(sanity_params) with open(testbed_json_file, 'w') as tb: tb.write(sanity_testbed_json) # Create /etc/contrail/openstackrc if not os.path.exists('/etc/contrail'): os.makedirs('/etc/contrail') with open('/etc/contrail/openstackrc', 'w') as rc: rc.write("export OS_USERNAME=%s\n" % admin_user) rc.write("export OS_PASSWORD=%s\n" % admin_password) rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant) rc.write("export OS_REGION_NAME=%s\n" % stack_region_name) rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol, auth_server_ip, auth_server_port)) rc.write("export OS_NO_CACHE=1\n") # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone config = ConfigParser.ConfigParser() config.optionxform = str vnc_api_ini = '/etc/contrail/vnc_api_lib.ini' if os.path.exists(vnc_api_ini): config.read(vnc_api_ini) if 'auth' not in config.sections(): config.add_section('auth') config.set('auth', 'AUTHN_TYPE', 'keystone') config.set('auth', 'AUTHN_PROTOCOL', auth_protocol) config.set('auth', 'AUTHN_SERVER', auth_server_ip) config.set('auth', 'AUTHN_PORT', auth_server_port) config.set('auth', 'AUTHN_URL', '/v2.0/tokens') with open(vnc_api_ini, 'w') as f: config.write(f) # If webui = True, in testbed, setup webui for sanity if webui: update_config_option('openstack', '/etc/keystone/keystone.conf', 'token', 'expiration', '86400', 'keystone') update_js_config('openstack', '/etc/contrail/config.global.js', 'contrail-webui')