def setup_keystone_ssl_certs_node(*nodes): default_certfile = '/etc/keystone/ssl/certs/keystone.pem' default_keyfile = '/etc/keystone/ssl/private/keystone.key' default_cafile = '/etc/keystone/ssl/certs/keystone_ca.pem' keystonecertbundle = get_keystone_cert_bundle() ssl_certs = ((get_keystone_certfile(), default_certfile), (get_keystone_keyfile(), default_keyfile), (get_keystone_cafile(), default_cafile)) index = env.roledefs['openstack'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % keystonecertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: openstack_host = env.roledefs['openstack'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating keystone SSL certs in first openstack node" subject_alt_names_mgmt = [hstr_to_ip(host) for host in env.roledefs['openstack']] subject_alt_names_ctrl = [hstr_to_ip(get_control_host_string(host)) for host in env.roledefs['openstack']] subject_alt_names = subject_alt_names_mgmt + subject_alt_names_ctrl if get_openstack_external_vip(): subject_alt_names.append(get_openstack_external_vip()) sudo('create-keystone-ssl-certs.sh %s %s' % ( get_openstack_internal_vip() or hstr_to_ip(get_control_host_string(openstack_host)), ','.join(subject_alt_names))) else: with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first openstack" sleep(0.1) print "Get SSL cert(%s) from first openstack" % ssl_cert tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) openstack node" % env.host_string sudo('mkdir -p /etc/keystone/ssl/certs/') sudo('mkdir -p /etc/keystone/ssl/private/') put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in openstack node" % ssl_cert pass else: raise RuntimeError("%s doesn't exists locally or in openstack node") if not exists(keystonecertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, keystonecertbundle)) sudo("chown -R keystone:keystone /etc/keystone/ssl")
def copy_apiserver_ssl_certs_to_node(*nodes): ssl_certs = (get_apiserver_certfile(), get_apiserver_cafile(), get_apiserver_keyfile(), get_apiserver_cert_bundle()) cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) if ssl_cert.endswith('.key'): cert_file = '/etc/contrail/ssl/private/%s' % os.path.basename(ssl_cert) if node not in env.roledefs['cfgm']: # Clear old certificate sudo('rm -f %s' % cert_file) if exists(cert_file, use_sudo=True): continue with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") sudo("mkdir -p /etc/contrail/ssl/private/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) with settings(warn_only=True): sudo("chown -R contrail:contrail /etc/contrail/ssl")
def copy_apiserver_ssl_certs_to_node(*nodes): ssl_certs = (get_apiserver_certfile(), get_apiserver_cafile(), get_apiserver_keyfile(), get_apiserver_cert_bundle()) cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) if ssl_cert.endswith('.key'): cert_file = '/etc/contrail/ssl/private/%s' % os.path.basename(ssl_cert) if node not in env.roledefs['cfgm']: # Clear old certificate sudo('rm -f %s' % cert_file) if exists(cert_file, use_sudo=True): continue with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") sudo("mkdir -p /etc/contrail/ssl/private/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) with settings(warn_only=True): sudo("chown -R contrail:contrail /etc/contrail/ssl")
def bootstrap_galera_cluster(): openstack_node = env.roledefs['openstack'][0] with settings(host_string=openstack_node, password=get_env_passwords(openstack_node)): sudo("service mysql start --wsrep_cluster_address=gcomm://") for openstack_node in env.roledefs['openstack'][1:]: with settings(host_string=openstack_node, password=get_env_passwords(openstack_node)): sudo("service mysql restart")
def sync_keystone_ssl_certs(): host_string = env.host_string temp_dir = tempfile.mkdtemp() with settings(host_string=env.roledefs["openstack"][0], password=get_env_passwords(env.roledefs["openstack"][0])): get_as_sudo("/etc/keystone/ssl/", temp_dir) with settings(host_string=host_string, password=get_env_passwords(host_string)): put("%s/ssl/" % temp_dir, "/etc/keystone/", use_sudo=True) sudo("service keystone restart")
def sync_keystone_ssl_certs_node(*args): for host_string in args: temp_dir= tempfile.mkdtemp() with settings(host_string=env.roledefs['openstack'][0], password=get_env_passwords(env.roledefs['openstack'][0])): get_as_sudo('/etc/keystone/ssl/', temp_dir) with settings(host_string=host_string, password=get_env_passwords(host_string)): put('%s/ssl/' % temp_dir, '/etc/keystone/', use_sudo=True) sudo('service keystone restart')
def sync_keystone_ssl_certs(): host_string = env.host_string temp_dir = tempfile.mkdtemp() with settings(host_string=env.roledefs['openstack'][0], password=get_env_passwords(env.roledefs['openstack'][0])): get_as_sudo('/etc/keystone/ssl/', temp_dir) with settings(host_string=host_string, password=get_env_passwords(host_string)): put('%s/ssl/' % temp_dir, '/etc/keystone/', use_sudo=True) sudo('service keystone restart')
def copy_vnc_api_lib_ini_to_node(*nodes): vnc_api_lib = '/etc/contrail/vnc_api_lib.ini' cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_fname = os.path.join('/tmp', os.path.basename(vnc_api_lib)) get_as_sudo(vnc_api_lib, tmp_fname) put(tmp_fname, vnc_api_lib, use_sudo=True)
def copy_vnc_api_lib_ini_to_node(*nodes): vnc_api_lib = '/etc/contrail/vnc_api_lib.ini' cfgm_host = env.roledefs['cfgm'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(vnc_api_lib)) get_as_sudo(vnc_api_lib, tmp_fname) put(tmp_fname, vnc_api_lib, use_sudo=True)
def setup_apiserver_ssl_certs_node(*nodes): default_certfile = '/etc/contrail/ssl/certs/contrail.pem' default_keyfile = '/etc/contrail/ssl/private/contrail.key' default_cafile = '/etc/contrail/ssl/certs/contrail_ca.pem' contrailcertbundle = get_apiserver_cert_bundle() ssl_certs = ((get_apiserver_certfile(), default_certfile), (get_apiserver_keyfile(), default_keyfile), (get_apiserver_cafile(), default_cafile)) index = env.roledefs['cfgm'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % contrailcertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: cfgm_host = env.roledefs['cfgm'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating apiserver SSL certs in first cfgm node" cfgm_ip = get_contrail_internal_vip() or hstr_to_ip(get_control_host_string(cfgm_host)) sudo('create-api-ssl-certs.sh %s' % cfgm_ip) else: with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first cfgm" sleep(0.1) print "Get SSL cert(%s) from first cfgm" % ssl_cert tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) cfgm node" % env.host_string sudo('mkdir -p /etc/contrail/ssl/certs/') sudo('mkdir -p /etc/contrail/ssl/private/') put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in cfgm node" % ssl_cert else: raise RuntimeError("%s doesn't exists locally or in cfgm node" % ssl_cert) if not exists(contrailcertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, contrailcertbundle)) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def fix_cmon_param_and_add_keys_to_compute(): cmon_param = '/etc/contrail/ha/cmon_param' compute_host_list = [] for host_string in env.roledefs['compute']: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo('hostname') compute_host_list.append(host_name) # Get AMQP host list amqp_in_role = 'cfgm' if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes': amqp_in_role = 'openstack' amqp_host_list = [] for host_string in env.roledefs[amqp_in_role]: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo('hostname') amqp_host_list.append(host_name) computes = 'COMPUTES=("' + '" "'.join(compute_host_list) + '")' sudo("echo '%s' >> %s" % (computes, cmon_param)) sudo("echo 'COMPUTES_SIZE=${#COMPUTES[@]}' >> %s" % cmon_param) sudo("echo 'COMPUTES_USER=root' >> %s" % cmon_param) sudo("echo 'PERIODIC_RMQ_CHK_INTER=60' >> %s" % cmon_param) sudo("echo 'RABBITMQ_RESET=True' >> %s" % cmon_param) amqps = 'DIPHOSTS=("' + '" "'.join(amqp_host_list) + '")' sudo("echo '%s' >> %s" % (amqps, cmon_param)) sudo("echo 'DIPS_HOST_SIZE=${#DIPHOSTS[@]}' >> %s" % cmon_param) id_rsa_pubs = {} if files.exists('~/.ssh', use_sudo=True): sudo('chmod 700 ~/.ssh') if (not files.exists('~/.ssh/id_rsa', use_sudo=True) and not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)): sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""') elif (not files.exists('~/.ssh/id_rsa', use_sudo=True) or not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)): sudo('rm -rf ~/.ssh/id_rsa*') sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""') id_rsa_pubs.update({env.host_string: sudo('cat ~/.ssh/id_rsa.pub')}) for host_string in env.roledefs['compute']: with settings(host_string=host_string): sudo("mkdir -p ~/.ssh/") for host, id_rsa_pub in id_rsa_pubs.items(): files.append('~/.ssh/authorized_keys', id_rsa_pub, use_sudo=True) sudo('chmod 640 ~/.ssh/authorized_keys')
def copy_certs_for_neutron_node(*nodes): for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): sudo("mkdir -p /etc/neutron/ssl/certs/") sudo("cp /etc/contrail/ssl/certs/* /etc/neutron/ssl/certs/") sudo("chown -R neutron:neutron /etc/neutron/ssl") sudo("usermod -a -G contrail neutron")
def attach_logs_cores(bug_id, timestamp=None, duration=None, analytics_log='yes'): ''' Attach the logs, core-files, bt and contrail-version to a specified location If argument duration is specified it will collect the cassandra logs for specifed duration. Unit of the argument duration is minute. If not specifed it will collect cassandra log for system uptime ''' build= env.roledefs['build'][0] if timestamp: folder= '%s/%s' %( bug_id, timestamp) else: time_str = dt.now().strftime("%Y_%m_%d_%H_%M_%S") folder='%s/%s' %(bug_id, time_str) local('mkdir -p %s' % ( folder ) ) execute(tar_logs_cores) if analytics_log == 'yes': execute(get_cassandra_logs,duration) with hide('everything'): for host in env.roledefs['all']: with settings( host_string=host, password=get_env_passwords(host), connection_attempts=3, timeout=20, warn_only=True): get('/var/log/logs_*.tgz', '%s/' %( folder ) ) get('/var/crashes/*gz', '%s/' %( folder ) ) get('/var/log/gdb_*.log','%s/' %( folder ) ) get('/var/log/contrail_version*.log','%s/' %( folder ) ) if analytics_log is 'yes': get('/var/log/cassandra_log*.gz','%s/' %( folder ) ) print "\nAll logs and cores are saved in %s of %s" %(folder, env.host)
def update_config_option(role, file_path, section, option, value, service, sku): """Task to update config option of any section in a conf file USAGE:fab update_config_option:openstack,/etc/keystone/keystone.conf,token,expiration,86400,keystone """ cmd1 = "openstack-config --set " + file_path + " " + section + " " + option + " " + value for host in env.roledefs[role]: with settings(host_string=host, password=get_env_passwords(host)): ostype = detect_ostype() service_name = SERVICE_NAMES.get(service, {}).get(ostype, service) cmd2 = "service " + service_name + " restart" if service == 'keystone': if sku == 'newton': cmd2 = '/etc/init.d/apache2 restart' elif sku == 'ocata': cmd1 = "sed -i 's/\[token\]/\[token\]\\nexpiration=86400/' \ /etc/kolla/keystone/keystone.conf < /etc/kolla/keystone/keystone.conf" cmd2 = 'docker restart ' + service_name cmd = "docker exec -it horizon " sudo_usr_path = "sudo /usr/share/openstack-dashboard/manage.py " cmd3 = cmd + "sudo sed -i -e\ 's:/usr/share/openstack-dashboard/static:\ /var/lib/openstack-dashboard/static:g' \ /etc/apache2/conf-enabled/000-default.conf " cmd4 = cmd + sudo_usr_path + " collectstatic --noinput" cmd5 = cmd + sudo_usr_path + " compress" cmd6 = cmd + "service apache2 reload" sudo(cmd3) sudo(cmd4) sudo(cmd5) sudo(cmd6) sudo(cmd1) sudo(cmd2)
def detach_vrouter_node(*args): """Detaches one/more compute node from the existing cluster.""" cfgm_host = get_control_host_string(env.roledefs['cfgm'][0]) cfgm_host_password = get_env_passwords(env.roledefs['cfgm'][0]) cfgm_ip = hstr_to_ip(cfgm_host) nova_compute = "openstack-nova-compute" for host_string in args: with settings(host_string=host_string, warn_only=True): sudo("service supervisor-vrouter stop") if detect_ostype() in ['ubuntu']: nova_compute = "nova-compute" mode = get_mode(host_string) if (mode == 'vcenter'): nova_compute = "" if (nova_compute != ""): sudo("service %s stop" % nova_compute) compute_hostname = sudo("hostname") with settings(host_string=env.roledefs['cfgm'][0], pasword=cfgm_host_password): sudo( "python /opt/contrail/utils/provision_vrouter.py --host_name %s --host_ip %s --api_server_ip %s --oper del %s" % (compute_hostname, host_string.split('@')[1], cfgm_ip, get_mt_opts())) execute("restart_control")
def fix_cmon_param_and_add_keys_to_compute(): cmon_param = '/etc/contrail/ha/cmon_param' compute_host_list = [] for host_string in env.roledefs['compute']: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo('hostname') compute_host_list.append(host_name) # Get AMQP host list amqp_in_role = 'cfgm' if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes': amqp_in_role = 'openstack' amqp_host_list = [] for host_string in env.roledefs[amqp_in_role]: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo('hostname -s') amqp_host_list.append(host_name) computes = 'COMPUTES=("' + '" "'.join(compute_host_list) + '")' sudo("grep -q 'COMPUTES' %s || echo '%s' >> %s" % (cmon_param, computes, cmon_param)) sudo("grep -q 'COMPUTES_SIZE' %s || echo 'COMPUTES_SIZE=${#COMPUTES[@]}' >> %s" % (cmon_param, cmon_param)) sudo("grep -q 'COMPUTES_USER' %s || echo 'COMPUTES_USER=root' >> %s" % (cmon_param, cmon_param)) sudo("grep -q 'PERIODIC_RMQ_CHK_INTER' %s || echo 'PERIODIC_RMQ_CHK_INTER=60' >> %s" % (cmon_param, cmon_param)) sudo("grep -q 'RABBITMQ_RESET' %s || echo 'RABBITMQ_RESET=True' >> %s" % (cmon_param, cmon_param)) amqps = 'DIPHOSTS=("' + '" "'.join(amqp_host_list) + '")' sudo("grep -q 'DIPHOSTS' %s || echo '%s' >> %s" % (cmon_param, amqps, cmon_param)) sudo("grep -q 'DIPS_HOST_SIZE' %s || echo 'DIPS_HOST_SIZE=${#DIPHOSTS[@]}' >> %s" % (cmon_param, cmon_param)) sudo("sort %s | uniq > /tmp/cmon_param" % cmon_param) sudo("mv /tmp/cmon_param %s" % cmon_param) id_rsa_pubs = {} if files.exists('~/.ssh', use_sudo=True): sudo('chmod 700 ~/.ssh') if (not files.exists('~/.ssh/id_rsa', use_sudo=True) and not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)): sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""') elif (not files.exists('~/.ssh/id_rsa', use_sudo=True) or not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)): sudo('rm -rf ~/.ssh/id_rsa*') sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""') id_rsa_pubs.update({env.host_string : sudo('cat ~/.ssh/id_rsa.pub')}) for host_string in env.roledefs['compute']: with settings(host_string=host_string): sudo("mkdir -p ~/.ssh/") for host, id_rsa_pub in id_rsa_pubs.items(): files.append('~/.ssh/authorized_keys', id_rsa_pub, use_sudo=True) sudo('chmod 640 ~/.ssh/authorized_keys')
def setup_apiserver_ssl_certs_node(*nodes): default_certfile = '/etc/contrail/ssl/certs/contrail.pem' default_keyfile = '/etc/contrail/ssl/private/contrail.key' default_cafile = '/etc/contrail/ssl/certs/contrail_ca.pem' contrailcertbundle = get_apiserver_cert_bundle() ssl_certs = ((get_apiserver_certfile(), default_certfile), (get_apiserver_keyfile(), default_keyfile), (get_apiserver_cafile(), default_cafile)) index = env.roledefs['cfgm'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) sudo('rm -f %s' % contrailcertbundle) for ssl_cert, default in ssl_certs: if ssl_cert == default: cfgm_host = env.roledefs['cfgm'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating apiserver SSL certs in first cfgm node" cfgm_ip = get_contrail_internal_vip() or hstr_to_ip(cfgm_host) sudo('create-api-ssl-certs.sh %s' % cfgm_ip) else: with settings(host_string=cfgm_host, password=get_env_passwords(cfgm_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first cfgm" sleep(0.1) print "Get SSL cert(%s) from first cfgm" % ssl_cert tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) cfgm node" % env.host_string put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in cfgm node" % ssl_cert else: raise RuntimeError("%s doesn't exists locally or in cfgm node" % ssl_cert) if not exists(contrailcertbundle, use_sudo=True): ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs sudo('cat %s %s > %s' % (certfile, cafile, contrailcertbundle)) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def copy_keystone_ssl_key_to_node(*nodes): ssl_key = get_keystone_keyfile() openstack_host = env.roledefs['openstack'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): key_file = '/etc/contrail/ssl/private/%s' % os.path.basename(ssl_key) # Clear old key sudo('rm -f %s' % key_file) with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_key)) get_as_sudo(ssl_key, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/private/") put(tmp_fname, key_file, use_sudo=True) os.remove(tmp_fname) sudo("chown -R contrail:contrail /etc/contrail/ssl/private")
def copy_keystone_ssl_certs_to_node(*nodes): ssl_certs = (get_keystone_certfile(), get_keystone_cafile()) openstack_host = env.roledefs['openstack'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) # Clear old certificate sudo('rm -f %s' % cert_file) with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def fix_wsrep_cluster_address(): openstack_host_list = [get_control_host_string(openstack_host) for openstack_host in env.roledefs['openstack']] galera_ip_list = [hstr_to_ip(galera_host) for galera_host in openstack_host_list] with settings(host_string=env.roledefs['openstack'][0], password=get_env_passwords(env.roledefs['openstack'][0])): wsrep_conf = '/etc/mysql/my.cnf' if detect_ostype() in ['ubuntu']: wsrep_conf = '/etc/mysql/conf.d/wsrep.cnf' sudo('sed -ibak "s#wsrep_cluster_address=.*#wsrep_cluster_address=gcomm://%s:4567#g" %s' % (':4567,'.join(galera_ip_list), wsrep_conf))
def fix_cmon_param_and_add_keys_to_compute(): cmon_param = "/etc/contrail/ha/cmon_param" compute_host_list = [] for host_string in env.roledefs["compute"]: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo("hostname") compute_host_list.append(host_name) # Get AMQP host list amqp_in_role = "cfgm" if get_from_testbed_dict("openstack", "manage_amqp", "no") == "yes": amqp_in_role = "openstack" amqp_host_list = [] for host_string in env.roledefs[amqp_in_role]: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo("hostname") amqp_host_list.append(host_name) computes = 'COMPUTES=("' + '" "'.join(compute_host_list) + '")' sudo("echo '%s' >> %s" % (computes, cmon_param)) sudo("echo 'COMPUTES_SIZE=${#COMPUTES[@]}' >> %s" % cmon_param) sudo("echo 'COMPUTES_USER=root' >> %s" % cmon_param) sudo("echo 'PERIODIC_RMQ_CHK_INTER=60' >> %s" % cmon_param) sudo("echo 'RABBITMQ_RESET=True' >> %s" % cmon_param) amqps = 'DIPHOSTS=("' + '" "'.join(amqp_host_list) + '")' sudo("echo '%s' >> %s" % (amqps, cmon_param)) sudo("echo 'DIPS_HOST_SIZE=${#DIPHOSTS[@]}' >> %s" % cmon_param) sudo("sort %s | uniq > /tmp/cmon_param" % cmon_param) sudo("mv /tmp/cmon_param %s" % cmon_param) id_rsa_pubs = {} if files.exists("~/.ssh", use_sudo=True): sudo("chmod 700 ~/.ssh") if not files.exists("~/.ssh/id_rsa", use_sudo=True) and not files.exists("~/.ssh/id_rsa.pub", use_sudo=True): sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""') elif not files.exists("~/.ssh/id_rsa", use_sudo=True) or not files.exists("~/.ssh/id_rsa.pub", use_sudo=True): sudo("rm -rf ~/.ssh/id_rsa*") sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""') id_rsa_pubs.update({env.host_string: sudo("cat ~/.ssh/id_rsa.pub")}) for host_string in env.roledefs["compute"]: with settings(host_string=host_string): sudo("mkdir -p ~/.ssh/") for host, id_rsa_pub in id_rsa_pubs.items(): files.append("~/.ssh/authorized_keys", id_rsa_pub, use_sudo=True) sudo("chmod 640 ~/.ssh/authorized_keys")
def setup_keystone_ssl_certs_node(*nodes): default_certfile = '/etc/keystone/ssl/certs/keystone.pem' default_keyfile = '/etc/keystone/ssl/private/keystone.key' default_cafile = '/etc/keystone/ssl/certs/keystone_ca.pem' ssl_certs = ((get_keystone_certfile(), default_certfile), (get_keystone_keyfile(), default_keyfile), (get_keystone_cafile(), default_cafile)) index = env.roledefs['openstack'].index(env.host_string) + 1 for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert, default in ssl_certs: if ssl_cert == default: # Clear old certificate sudo('rm -f %s' % ssl_cert) for ssl_cert, default in ssl_certs: if ssl_cert == default: openstack_host = env.roledefs['openstack'][0] if index == 1: if not exists(ssl_cert, use_sudo=True): print "Creating keystone SSL certs in first openstack node" sudo('create-keystone-ssl-certs.sh %s' % ( get_openstack_internal_vip() or hstr_to_ip(openstack_host))) else: with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): while not exists(ssl_cert, use_sudo=True): print "Wait for SSL certs to be created in first openstack" sleep(0.1) print "Get SSL cert(%s) from first openstack" % ssl_cert tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) print "Copy to this(%s) openstack node" % env.host_string put(tmp_fname, ssl_cert, use_sudo=True) os.remove(tmp_fname) elif os.path.isfile(ssl_cert): print "Certificate (%s) exists locally" % ssl_cert put(ssl_cert, default, use_sudo=True) elif exists(ssl_cert, use_sudo=True): print "Certificate (%s) exists in openstack node" % ssl_cert pass else: raise RuntimeError("%s doesn't exists locally or in openstack node") sudo("chown -R keystone:keystone /etc/keystone/ssl")
def copy_keystone_ssl_certs_to_node(*nodes): ssl_certs = (get_keystone_certfile(), get_keystone_cafile()) openstack_host = env.roledefs['openstack'][0] for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): for ssl_cert in ssl_certs: cert_file = '/etc/contrail/ssl/certs/%s' % os.path.basename(ssl_cert) # Clear old certificate sudo('rm -f %s' % cert_file) with settings(host_string=openstack_host, password=get_env_passwords(openstack_host)): tmp_dir= tempfile.mkdtemp() tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert)) get_as_sudo(ssl_cert, tmp_fname) sudo("mkdir -p /etc/contrail/ssl/certs/") put(tmp_fname, cert_file, use_sudo=True) os.remove(tmp_fname) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def update_config_option(role, file_path, section, option, value, service): """Task to update config option of any section in a conf file USAGE:fab update_config_option:openstack,/etc/keystone/keystone.conf,token,expiration,86400,keystone """ cmd1 = "openstack-config --set " + file_path + " " + section + " " + option + " " + value cmd2= "service " + service + " restart" for host in env.roledefs[role]: with settings(host_string=host, password=get_env_passwords(host)): sudo(cmd1) sudo(cmd2)
def update_config_option(role, file_path, section, option, value, service): """Task to update config option of any section in a conf file USAGE:fab update_config_option:openstack,/etc/keystone/keystone.conf,token,expiration,86400,keystone """ cmd1 = "openstack-config --set " + file_path + " " + section + " " + option + " " + value cmd2 = "service " + service + " restart" for host in env.roledefs[role]: with settings(host_string=host, password=get_env_passwords(host)): sudo(cmd1) sudo(cmd2)
def copy_certs_for_heat_node(*nodes): for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): if node in env.roledefs['cfgm']: sudo("usermod -a -G contrail heat") else: execute('copy_apiserver_ssl_certs_to_node', node) execute('copy_vnc_api_lib_ini_to_node', node) sudo("chown -R heat:heat /etc/contrail") for svc in ['heat-api', 'heat-engine', 'heat-api-cfn']: sudo("service %s restart" % svc)
def check_reimage_status(): user, hostip = env.host_string.split('@') print 'Reimage issued; Waiting for the node (%s) to go down...' % hostip common.wait_until_host_down(wait=300, host=hostip) print 'Node (%s) is down... Waiting for node to come back up' % hostip sys.stdout.write('.') while not verify_sshd(hostip, user, get_env_passwords(env.host_string)): sys.stdout.write('.') sys.stdout.flush() sleep(2) continue
def copy_certs_for_heat_node(*nodes): for node in nodes: with settings(host_string=node, password=get_env_passwords(node)): if node in env.roledefs['cfgm']: sudo("usermod -a -G contrail heat") else: execute('copy_apiserver_ssl_certs_to_node', node) execute('copy_vnc_api_lib_ini_to_node', node) sudo("chown -R heat:heat /etc/contrail") for svc_key in ['heat-api', 'heat-engine', 'heat-api-cfn']: sudo("service %s restart" % get_openstack_services()[svc_key])
def update_js_config(role, file_path, service): """Task to update config of any section in a js file USAGE:fab update_js_config:openstack,/etc/contrail/config.global.js,contrail-webui """ with open(file_path, 'a') as fo: fo.write('config.session = {};\n') fo.write('config.session.timeout = 24 * 60 * 60 * 1000;\n') fo.close() cmd = "service " + service + " restart" for host in env.roledefs[role]: with settings(host_string=host, password=get_env_passwords(host)): sudo(cmd)
def update_js_config(role, file_path, service): """Task to update config of any section in a js file USAGE:fab update_js_config:openstack,/etc/contrail/config.global.js,contrail-webui """ cmd1 = "echo 'config.session = {};' >> " + file_path cmd2 = "echo 'config.session.timeout = 86400 * 1000;' >> " + file_path cmd3 = "service " + service + " restart" for host in env.roledefs[role]: with settings(host_string=host, password=get_env_passwords(host)): sudo(cmd1) sudo(cmd2) sudo(cmd3)
def get_compute_host_intf(compute): compute_ip = hstr_to_ip(compute) with settings(host_string=compute, password=get_env_passwords(compute)): get_name = "ifconfig -a | grep -B1 %s | cut -d' ' -f1" % compute_ip host_intf = sudo(get_name).strip() if host_intf == 'br0': get_hw_addr = "ifconfig br0 | grep 'HWaddr' | awk '{print $5}'" hw_addr = sudo(get_hw_addr).strip() get_name = "ifconfig -a | grep '%s' | awk '{print $1}'" % hw_addr host_intf_list = sudo(get_name).strip().split('\n') host_intf_list = map(str.strip, host_intf_list) host_intf_list.remove('br0') host_intf = host_intf_list[0] return host_intf
def issu_openstack_snapshot_db(from_version, to_version, db_file): if from_version == 'kilo' and to_version == 'liberty': sudo("nova-manage db migrate_flavor_data") sql_passwd = sudo('cat /etc/contrail/mysql.token') sudo("mysqldump -u root --password=%s --opt --add-drop-database --all-databases > %s" %(sql_passwd, db_file)) svc_token = '/etc/contrail/service.token' mysql_token = '/etc/contrail/mysql.token' get_as_sudo('~/%s' %db_file, '/tmp') get_as_sudo(svc_token, '/tmp') get_as_sudo(mysql_token, '/tmp') with settings(host_string=env.roledefs['openstack'][0], password=get_env_passwords(env.roledefs['openstack'][0])): put('/tmp/%s' %(db_file), '~/', use_sudo=True) put('/tmp/service.token', svc_token, use_sudo=True) put('/tmp/mysql.token', mysql_token, use_sudo=True)
def fix_wsrep_cluster_address(): openstack_host_list = [ get_control_host_string(openstack_host) for openstack_host in env.roledefs['openstack'] ] galera_ip_list = [ hstr_to_ip(galera_host) for galera_host in openstack_host_list ] with settings(host_string=env.roledefs['openstack'][0], password=get_env_passwords(env.roledefs['openstack'][0])): wsrep_conf = '/etc/mysql/my.cnf' if detect_ostype() in ['ubuntu']: wsrep_conf = '/etc/mysql/conf.d/wsrep.cnf' sudo( 'sed -ibak "s#wsrep_cluster_address=.*#wsrep_cluster_address=gcomm://%s:4567#g" %s' % (':4567,'.join(galera_ip_list), wsrep_conf))
def rabbitmq_env(): erl_node_name = None rabbit_env_conf = "/etc/rabbitmq/rabbitmq-env.conf" with settings(host_string=env.host_string, password=get_env_passwords(env.host_string)): host_name = sudo("hostname -s") + ctrl erl_node_name = "rabbit@%s" % (host_name) rabbitmq_env_template = rabbitmq_env_conf rmq_env_conf = rabbitmq_env_template.template.safe_substitute( {"__erl_node_ip__": hstr_to_ip(get_control_host_string(env.host_string)), "__erl_node_name__": erl_node_name} ) tmp_fname = "/tmp/rabbitmq-env-%s.conf" % env.host_string cfg_file = open(tmp_fname, "w") cfg_file.write(rmq_env_conf) cfg_file.close() put(tmp_fname, rabbit_env_conf, use_sudo=True) local("rm %s" % (tmp_fname))
def rabbitmq_env(): erl_node_name = None rabbit_env_conf = '/etc/rabbitmq/rabbitmq-env.conf' with settings(host_string=env.host_string, password=get_env_passwords(env.host_string)): host_name = sudo('hostname -s') + ctrl erl_node_name = "rabbit@%s" % (host_name) rabbitmq_env_template = rabbitmq_env_conf rmq_env_conf = rabbitmq_env_template.template.safe_substitute({ '__erl_node_ip__' : hstr_to_ip(get_control_host_string(env.host_string)), '__erl_node_name__' : erl_node_name, }) tmp_fname = "/tmp/rabbitmq-env-%s.conf" % env.host_string cfg_file = open(tmp_fname, 'w') cfg_file.write(rmq_env_conf) cfg_file.close() put(tmp_fname, rabbit_env_conf, use_sudo=True) local("rm %s" %(tmp_fname))
def update_js_config(role, file_path, service, container=None): """Task to update config of any section in a js file USAGE:fab update_js_config:openstack,/etc/contrail/config.global.js,contrail-webui """ if container: cmd = "docker exec -it controller bash " cmd1 = cmd + "-c \"echo config.session = \{\}\; >> " + file_path + "\"" cmd2 = cmd + "-c \"echo config.session.timeout = 86400 \* 1000\; >> " + file_path + "\"" cmd3 = cmd + "service " + service + " restart" else: cmd1 = "echo 'config.session = {};' >> " + file_path cmd2 = "echo 'config.session.timeout = 86400 * 1000;' >> " + file_path cmd3 = "service " + service + " restart" for host in env.roledefs[role]: with settings(host_string=host, password=get_env_passwords(host)): sudo(cmd1) sudo(cmd2) sudo(cmd3)
def detach_vrouter_node(*args): """Detaches one/more compute node from the existing cluster.""" cfgm_host = get_control_host_string(env.roledefs['cfgm'][0]) cfgm_host_password = get_env_passwords(env.roledefs['cfgm'][0]) cfgm_ip = hstr_to_ip(cfgm_host) nova_compute = "openstack-nova-compute" if detect_ostype() in ['ubuntu']: nova_compute = "nova-compute" for host_string in args: compute_hostname = socket.gethostbyaddr(hstr_to_ip(host_string))[0].split('.')[0] with settings(host_string=host_string, warn_only=True): sudo("service supervisor-vrouter stop") sudo("service %s stop" % nova_compute) with settings(host_string=cfgm_host, pasword=cfgm_host_password): sudo("python /opt/contrail/utils/provision_vrouter.py --host_name %s --host_ip %s --api_server_ip %s --oper del %s" % (compute_hostname, host_string.split('@')[1], cfgm_ip, get_mt_opts())) execute("restart_control")
def config_rabbitmq(): rabbit_hosts = [] rabbit_conf = "/etc/rabbitmq/rabbitmq.config" if len(env.roledefs["rabbit"]) <= 1 and detect_ostype() == "redhat": print "CONFIG_RABBITMQ: Skip creating rabbitmq.config for Single node setup" return for host_string in env.roledefs["rabbit"]: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo("hostname -s") + ctrl rabbit_hosts.append("'rabbit@%s'" % host_name) rabbit_hosts = ", ".join(rabbit_hosts) rabbitmq_config_template = rabbitmq_config if len(env.roledefs["rabbit"]) == 1: rabbitmq_config_template = rabbitmq_config_single_node rabbitmq_configs = rabbitmq_config_template.template.safe_substitute( {"__control_intf_ip__": hstr_to_ip(get_control_host_string(env.host_string)), "__rabbit_hosts__": rabbit_hosts} ) tmp_fname = "/tmp/rabbitmq_%s.config" % env.host_string cfg_file = open(tmp_fname, "w") cfg_file.write(rabbitmq_configs) cfg_file.close() put(tmp_fname, "/etc/rabbitmq/rabbitmq.config", use_sudo=True) local("rm %s" % (tmp_fname))
def use_keystone_ssl_certs_in_node(*nodes): for node in nodes: execute('copy_keystone_ssl_certs_to_node', node) execute('copy_keystone_ssl_key_to_node', node) with settings(host_string=node, password=get_env_passwords(node)): cert_path = '/etc/contrail/ssl/certs/' ssl_certs = (get_keystone_certfile(), get_keystone_cafile()) for ssl_cert in ssl_certs: src = os.path.join(cert_path, os.path.basename(ssl_cert)) dst = os.path.join(cert_path, os.path.basename(ssl_cert).replace('keystone', 'contrail')) sudo("cp %s %s" % (src, dst)) key_path = '/etc/contrail/ssl/private/' ssl_key = get_keystone_keyfile() src_key = os.path.join(key_path, os.path.basename(ssl_key)) dst_key = os.path.join(key_path, os.path.basename(ssl_key).replace('keystone', 'contrail')) sudo("cp %s %s" % (src_key, dst_key)) certfile = '/etc/contrail/ssl/certs/contrail.pem' cafile = '/etc/contrail/ssl/certs/contrail_ca.pem' contrailcertbundle = get_apiserver_cert_bundle() sudo('cat %s %s > %s' % (certfile, cafile, contrailcertbundle)) sudo("chown -R contrail:contrail /etc/contrail/ssl")
def config_rabbitmq(): rabbit_hosts = [] rabbit_conf = '/etc/rabbitmq/rabbitmq.config' if len(env.roledefs['rabbit']) <= 1 and detect_ostype() == 'redhat': print "CONFIG_RABBITMQ: Skip creating rabbitmq.config for Single node setup" return for host_string in env.roledefs['rabbit']: with settings(host_string=host_string, password=get_env_passwords(host_string)): host_name = sudo('hostname -s') + ctrl rabbit_hosts.append("\'rabbit@%s\'" % host_name) rabbit_hosts = ', '.join(rabbit_hosts) rabbitmq_config_template = rabbitmq_config if len(env.roledefs['rabbit']) == 1: rabbitmq_config_template = rabbitmq_config_single_node rabbitmq_configs = rabbitmq_config_template.template.safe_substitute({ '__control_intf_ip__' : hstr_to_ip(get_control_host_string(env.host_string)), '__rabbit_hosts__' : rabbit_hosts, }) tmp_fname = "/tmp/rabbitmq_%s.config" % env.host_string cfg_file = open(tmp_fname, 'w') cfg_file.write(rabbitmq_configs) cfg_file.close() put(tmp_fname, "/etc/rabbitmq/rabbitmq.config", use_sudo=True) local("rm %s" %(tmp_fname))
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): """ Configure test environment by creating sanity_params.ini and sanity_testbed.json files """ sys.path.insert(0, contrail_fab_path) from fabfile.testbeds import testbed from fabfile.utils.host import get_openstack_internal_vip,\ get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ get_authserver_port, get_env_passwords, get_authserver_credentials, \ get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute from fabfile.utils.multitenancy import get_mt_enable from fabfile.utils.interface import get_data_ip cfgm_host = env.roledefs['cfgm'][0] with settings(warn_only=True): with lcd(contrail_fab_path): if local('git branch').succeeded: fab_revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host): fab_revision = run('cat /opt/contrail/contrail_packages/VERSION') with lcd(test_dir): if local('git branch').succeeded: revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host): revision = run('cat /opt/contrail/contrail_packages/VERSION') sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms':[], 'hosts_ipmi': [], 'tor':[], } sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) with settings(host_string = env.roledefs['openstack'][0]): openstack_host_name = run("hostname") with settings(host_string = env.roledefs['cfgm'][0]): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host): host_name = run("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() for host_string in env.roledefs['all']: if host_string in env.roledefs.get('test',[]): continue host_ip = host_string.split('@')[1] with settings(host_string = host_string): host_name = run("hostname") host_dict = {} host_dict['ip'] = host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] =get_env_passwords(host_string) host_dict['roles'] = [] if not internal_vip: if host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} if internal_vip: role_dict['openstack'] = 'contrail-vip' else: role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host'] host_dict['roles'] = [] sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) # Adding vip VIP dict for HA test setup with settings(host_string = env.roledefs['openstack'][0]): if internal_vip: host_dict = {} host_dict['data-ip']= get_authserver_ip() host_dict['control-ip']= get_authserver_ip() host_dict['ip']= get_authserver_ip() host_dict['name'] = 'contrail-vip' with settings(host_string = env.roledefs['cfgm'][0]): host_dict['username'] = host_string.split('@')[0] host_dict['password'] = get_env_passwords(host_string) host_dict['roles'] = [] role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stop_on_fail = env.get('stop_on_fail', False) mail_to = env.get('mail_to', '') log_scenario = env.get('log_scenario', 'Sanity') stack_user, stack_password = get_authserver_credentials() stack_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = 'default-domain' webserver_host = '10.204.216.50' webserver_user = '******' webserver_password = '******' webserver_log_path = '/home/bhushana/Documents/technical/logs/' webserver_report_path = '/home/bhushana/Documents/technical/sanity' webroot = 'Docs/logs' mail_server = '10.204.216.49' mail_port = '25' fip_pool_name = 'floating-ip-pool' public_virtual_network='public' public_tenant_name='admin' fixture_cleanup = 'yes' generate_html_report = 'True' key = 'key1' mailSender = '*****@*****.**' use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) orch = getattr(env, 'orchestrator', 'openstack') router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if 'mail_server' in env.keys(): mail_server = env.mail_server mail_port = env.mail_port vcenter_dc = '' if orch == 'vcenter': public_tenant_name='vCenter' if env.has_key('vcenter'): if env.vcenter: vcenter_dc = env.vcenter['datacenter'] sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__nova_keypair_name__' : key, '__orch__' : orch, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__auth_ip__' : get_authserver_ip(), '__auth_port__' : get_authserver_port(), '__stack_tenant__' : stack_tenant, '__stack_domain__' : stack_domain, '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mailSender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : False, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__' : fip_pool_name, '__public_virtual_network__':public_virtual_network, '__public_tenant_name__' :public_tenant_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', ''), '__vcenter_dc__' : vcenter_dc, '__vcenter_server__' : get_vcenter_ip(), '__vcenter_port__' : get_vcenter_port(), '__vcenter_username__' : get_vcenter_username(), '__vcenter_password__' : get_vcenter_password(), '__vcenter_datacenter__' : get_vcenter_datacenter(), '__vcenter_compute__' : get_vcenter_compute(), '__use_devicemanager_for_md5__' : use_devicemanager_for_md5, }) ini_file = test_dir + '/' + 'sanity_params.ini' testbed_json_file = test_dir + '/' + 'sanity_testbed.json' with open(ini_file, 'w') as ini: ini.write(sanity_params) with open(testbed_json_file,'w') as tb: tb.write(sanity_testbed_json)
def zookeeper_rolling_restart(): zoo_cfg = "/etc/zookeeper/conf/zoo.cfg" cfgm_nodes = copy.deepcopy(env.roledefs['cfgm']) database_nodes = copy.deepcopy(env.roledefs['database']) zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) % 2) != 1: print "Recommended to run odd number of zookeeper(database) nodes." print "Add a new node to the existing clusters testbed,py and install contrail-install-packages in it.\n\ Installing/Provisioning will be done as part of Upgrade" exit(0) if cfgm_nodes == database_nodes: print "No need for rolling restart." if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return execute('stop_cfgm') execute('backup_zookeeper_database') old_nodes = list(set(cfgm_nodes).difference(set(database_nodes))) new_nodes = list(set(database_nodes).difference(set(cfgm_nodes))) for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) with settings(host_string=new_node, password=get_env_passwords(new_node)): pdist = detect_ostype() print "Install zookeeper in the new node." execute('create_install_repo_node', new_node) remove_package(['supervisor'], pdist) upgrade_package(['python-contrail', 'contrail-openstack-database', 'zookeeper'], pdist) if pdist in ['ubuntu']: sudo("ln -sf /bin/true /sbin/chkconfig") sudo("chkconfig zookeeper on") print "Fix zookeeper configs" sudo("sudo sed 's/^#log4j.appender.ROLLINGFILE.MaxBackupIndex=/log4j.appender.ROLLINGFILE.MaxBackupIndex=/g' /etc/zookeeper/conf/log4j.properties > log4j.properties.new") sudo("sudo mv log4j.properties.new /etc/zookeeper/conf/log4j.properties") if pdist in ['centos']: sudo('echo export ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /usr/lib/zookeeper/bin/zkEnv.sh') if pdist in ['ubuntu']: sudo('echo ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /etc/zookeeper/conf/environment') print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) print "Add new nodes to existing zookeeper quorum" with settings(host_string=cfgm_nodes[0], password=get_env_passwords(cfgm_nodes[0])): for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(new_node), zoo_cfg)) tmp_dir= tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Restart zookeeper in all nodes to make new nodes join zookeeper quorum" for zookeeper_node in cfgm_nodes + new_nodes: with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): put(tmp_dir+'/zoo.cfg', zoo_cfg, use_sudo=True) print "Start Zookeeper in new database node" execute('restart_zookeeper') print "Waiting 5 seconds for the new nodes in the zookeeper quorum to be synced." sleep(5) print "Shutdown old nodes one by one and also make sure leader/follower election is complete after each shut downs" zoo_nodes = cfgm_nodes + database_nodes for old_node in old_nodes: zoo_nodes.remove(old_node) with settings(host_string=old_node, password=get_env_passwords(old_node)): print "Stop Zookeeper in old cfgm node" execute('stop_zookeeper') for zoo_node in zoo_nodes: with settings(host_string=zoo_node, password=get_env_passwords(zoo_node)): sudo("sed -i '/^server.*%s:2888:3888/d' %s" % (hstr_to_ip(zoo_node), zoo_cfg)) retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*zoo_nodes) if (len(zoo_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break elif (len(zoo_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break else: retries -= 1 if retries: for zoo_node in zoo_nodes: with settings(host_string=zoo_node, password=get_env_passwords(zoo_node)): execute('restart_zookeeper') continue print "Zookeeper quorum is not formed. Fix it and retry upgrade" print zookeeper_status exit(1) print "Correct the server id in zoo.cfg for the new nodes in the zookeeper quorum" with settings(host_string=database_nodes[0], password=get_env_passwords(database_nodes[0])): sudo("sed -i '/^server.*3888/d' %s" % zoo_cfg) for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(zookeeper_node), zoo_cfg)) tmp_dir= tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Correct the myid in myid file for the new nodes in the zookeeper quorum" for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) execute('stop_zookeeper') print "Restart all the zookeeper nodes in the new quorum" for zookeeper_node in database_nodes: with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): put(tmp_dir+'/zoo.cfg', zoo_cfg, use_sudo=True) execute('restart_zookeeper') print "Make sure leader/folower election is complete" with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status break elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." break else: retries -= 1 if retries: continue print "Zookeepr leader/follower election has problems. Fix it and retry upgrade" print zookeeper_status exit(1)
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): """ Configure test environment by creating sanity_params.ini and sanity_testbed.json files """ print "Configuring test environment" sys.path.insert(0, contrail_fab_path) from fabfile.testbeds import testbed from fabfile.utils.host import get_openstack_internal_vip, \ get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ get_authserver_port, get_env_passwords, get_authserver_credentials, \ get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \ get_authserver_protocol, get_region_name, get_contrail_internal_vip, \ get_openstack_external_vip, get_contrail_external_vip from fabfile.utils.multitenancy import get_mt_enable from fabfile.utils.interface import get_data_ip from fabfile.tasks.install import update_config_option, update_js_config cfgm_host = env.roledefs['cfgm'][0] auth_protocol = get_authserver_protocol() auth_server_ip = get_authserver_ip() auth_server_port = get_authserver_port() with settings(warn_only=True), hide('everything'): with lcd(contrail_fab_path): if local('git branch').succeeded: fab_revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): fab_revision = run( 'cat /opt/contrail/contrail_packages/VERSION') with lcd(test_dir): if local('git branch').succeeded: revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): revision = run( 'cat /opt/contrail/contrail_packages/VERSION') sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms': [], 'vcenter_servers': [], 'hosts_ipmi': [], 'tor': [], } sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if env.get('orchestrator', 'openstack') != 'vcenter': with settings( host_string=env.roledefs['openstack'][0]), hide('everything'): openstack_host_name = run("hostname") with settings(host_string=env.roledefs['cfgm'][0]), hide('everything'): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string=control_host), hide('everything'): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string=cassandra_host), hide('everything'): host_name = run("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() contrail_internal_vip = get_contrail_internal_vip() contrail_external_vip = get_contrail_external_vip() multi_role_test = False for host_string in env.roledefs['all']: if host_string in env.roledefs.get('test', []): for role in env.roledefs.iterkeys(): if role in ['test', 'all']: continue if host_string in env.roledefs.get(role, []): multi_role_test = True break if not multi_role_test: continue host_ip = host_string.split('@')[1] with settings(host_string=host_string), hide('everything'): host_name = run("hostname") host_dict = {} host_dict['ip'] = host_ip host_dict['data-ip'] = get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip'] = get_control_host_string(host_string).split( '@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] = get_env_passwords(host_string) host_dict['roles'] = [] if host_string in env.roledefs['openstack']: role_dict = { 'type': 'openstack', 'params': { 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = { 'type': 'cfgm', 'params': { 'collector': host_name, 'cassandra': ' '.join(cassandra_host_names) } } if env.get('orchestrator', 'openstack') != 'vcenter': role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = { 'type': 'bgp', 'params': { 'collector': cfgm_host_name, 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys( ) and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': { 'cassandra': ' '.join(cassandra_host_names) } } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = { 'type': 'compute', 'params': { 'collector': cfgm_host_name, 'cfgm': cfgm_host_name } } role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys( ) and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': { 'cassandra': ' '.join(cassandra_host_names) } } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys( ) and host_string in env.roledefs['webui']: role_dict = {'type': 'webui', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts if env.has_key('xmpp_auth_enable'): sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable if env.has_key('xmpp_dns_auth_enable'): sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'contrail_vm' in esxi_hosts[esxi]: host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm'][ 'host'] host_dict['roles'] = [] sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) vcenter_servers = env.get('vcenter_servers') if vcenter_servers: for vcenter in vcenter_servers: host_dict = {} host_dict['server'] = vcenter_servers[vcenter]['server'] host_dict['port'] = vcenter_servers[vcenter]['port'] host_dict['username'] = vcenter_servers[vcenter]['username'] host_dict['password'] = vcenter_servers[vcenter]['password'] host_dict['datacenter'] = vcenter_servers[vcenter]['datacenter'] host_dict['auth'] = vcenter_servers[vcenter]['auth'] host_dict['cluster'] = vcenter_servers[vcenter]['cluster'] host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_switch'][ 'dv_switch_name'] #Mostly we do not use the below info for vcenter sanity tests. #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'dv_port_group' in vcenter_servers[vcenter]: host_dict['dv_port_group'] = vcenter_servers[vcenter][ 'dv_port_group']['dv_portgroup_name'] sanity_testbed_dict['vcenter_servers'].append(host_dict) #get other orchestrators (vcenter etc) info if any slave_orch = None if env.has_key('other_orchestrators'): sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators for k, v in env.other_orchestrators.items(): if v['type'] == 'vcenter': slave_orch = 'vcenter' # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) if not getattr(env, 'test', None): env.test = {} # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stack_user = env.test.get('stack_user', os.getenv('STACK_USER') or '') stack_password = env.test.get('stack_password', os.getenv('STACK_PASSWORD') or '') stack_tenant = env.test.get('stack_tenant', os.getenv('STACK_TENANT') or '') tenant_isolation = env.test.get('tenant_isolation', os.getenv('TENANT_ISOLATION') or '') stop_on_fail = env.get('stop_on_fail', False) mail_to = env.test.get('mail_to', os.getenv('MAIL_TO') or '') log_scenario = env.get('log_scenario', 'Sanity') stack_region_name = get_region_name() admin_user, admin_password = get_authserver_credentials() admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = env.get('stack_domain', 'default-domain') webserver_host = env.test.get('webserver_host', os.getenv('WEBSERVER_HOST') or '') webserver_user = env.test.get('webserver_user', os.getenv('WEBSERVER_USER') or '') webserver_password = env.test.get('webserver_password', os.getenv('WEBSERVER_PASSWORD') or '') webserver_log_path = env.test.get( 'webserver_log_path', os.getenv('WEBSERVER_LOG_PATH') or '/var/www/contrail-test-ci/logs/') webserver_report_path = env.test.get( 'webserver_report_path', os.getenv('WEBSERVER_REPORT_PATH') or '/var/www/contrail-test-ci/reports/') webroot = env.test.get('webroot', os.getenv('WEBROOT') or 'contrail-test-ci') mail_server = env.test.get('mail_server', os.getenv('MAIL_SERVER') or '') mail_port = env.test.get('mail_port', os.getenv('MAIL_PORT') or '25') fip_pool_name = env.test.get( 'fip_pool_name', os.getenv('FIP_POOL_NAME') or 'floating-ip-pool') public_virtual_network = env.test.get( 'public_virtual_network', os.getenv('PUBLIC_VIRTUAL_NETWORK') or 'public') public_tenant_name = env.test.get( 'public_tenant_name', os.getenv('PUBLIC_TENANT_NAME') or 'admin') fixture_cleanup = env.test.get('fixture_cleanup', os.getenv('FIXTURE_CLEANUP') or 'yes') generate_html_report = env.test.get( 'generate_html_report', os.getenv('GENERATE_HTML_REPORT') or 'True') keypair_name = env.test.get('keypair_name', os.getenv('KEYPAIR_NAME') or 'contrail_key') mail_sender = env.test.get( 'mail_sender', os.getenv('MAIL_SENDER') or '*****@*****.**') discovery_ip = env.test.get('discovery_ip', os.getenv('DISCOVERY_IP') or '') config_api_ip = env.test.get('config_api_ip', os.getenv('CONFIG_API_IP') or '') analytics_api_ip = env.test.get('analytics_api_ip', os.getenv('ANALYTICS_API_IP') or '') discovery_port = env.test.get('discovery_port', os.getenv('DISCOVERY_PORT') or '') config_api_port = env.test.get('config_api_port', os.getenv('CONFIG_API_PORT') or '') analytics_api_port = env.test.get('analytics_api_port', os.getenv('ANALYTICS_API_PORT') or '') control_port = env.test.get('control_port', os.getenv('CONTROL_PORT') or '') dns_port = env.test.get('dns_port', os.getenv('DNS_PORT') or '') agent_port = env.test.get('agent_port', os.getenv('AGENT_PORT') or '') user_isolation = env.test.get('user_isolation', bool(os.getenv('USER_ISOLATION') or True)) use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) orch = getattr(env, 'orchestrator', 'openstack') router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if not env.has_key('openstack'): env.openstack = {} if not env.has_key('cfgm'): env.cfgm = {} config_amqp_ip = env.openstack.get('amqp_host', '') if config_amqp_ip: config_amqp_ips = [config_amqp_ip] else: config_amqp_ips = [] # If amqp details are in env.cfgm as well, use that config_amqp_port = env.cfgm.get('amqp_port', '5672') config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips) key_filename = env.get('key_filename', '') pubkey_filename = env.get('pubkey_filename', '') vcenter_dc = '' if orch == 'vcenter' or slave_orch == 'vcenter': public_tenant_name = 'vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for k in env.vcenter_servers: vcenter_dc = env.vcenter_servers[k]['datacenter'] sanity_params = sanity_ini_templ.safe_substitute({ '__testbed_json_file__': 'sanity_testbed.json', '__nova_keypair_name__': keypair_name, '__orch__': orch, '__admin_user__': admin_user, '__admin_password__': admin_password, '__admin_tenant__': admin_tenant, '__tenant_isolation__': tenant_isolation, '__stack_user__': stack_user, '__stack_password__': stack_password, '__auth_ip__': auth_server_ip, '__auth_port__': auth_server_port, '__auth_protocol__': auth_protocol, '__stack_region_name__': stack_region_name, '__stack_tenant__': stack_tenant, '__stack_domain__': stack_domain, '__multi_tenancy__': get_mt_enable(), '__address_family__': get_address_family(), '__log_scenario__': log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__': fixture_cleanup, '__key_filename__': key_filename, '__pubkey_filename__': pubkey_filename, '__webserver__': webserver_host, '__webserver_user__': webserver_user, '__webserver_password__': webserver_password, '__webserver_log_dir__': webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__': webroot, '__mail_server__': mail_server, '__mail_port__': mail_port, '__sender_mail_id__': mail_sender, '__receiver_mail_id__': mail_to, '__http_proxy__': env.get('http_proxy', ''), '__ui_browser__': ui_browser, '__ui_config__': ui_config, '__horizon__': horizon, '__webui__': webui, '__devstack__': False, '__public_vn_rtgt__': public_vn_rtgt, '__router_asn__': router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__': fip_pool_name, '__public_virtual_network__': public_virtual_network, '__public_tenant_name__': public_tenant_name, '__public_vn_subnet__': public_vn_subnet, '__test_revision__': revision, '__fab_revision__': fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__': stop_on_fail, '__ha_setup__': getattr(testbed, 'ha_setup', ''), '__ipmi_username__': getattr(testbed, 'ipmi_username', ''), '__ipmi_password__': getattr(testbed, 'ipmi_password', ''), '__contrail_internal_vip__': contrail_internal_vip, '__contrail_external_vip__': contrail_external_vip, '__internal_vip__': internal_vip, '__external_vip__': external_vip, '__vcenter_dc__': vcenter_dc, '__vcenter_server__': get_vcenter_ip(), '__vcenter_port__': get_vcenter_port(), '__vcenter_username__': get_vcenter_username(), '__vcenter_password__': get_vcenter_password(), '__vcenter_datacenter__': get_vcenter_datacenter(), '__vcenter_compute__': get_vcenter_compute(), '__use_devicemanager_for_md5__': use_devicemanager_for_md5, '__discovery_port__': discovery_port, '__config_api_port__': config_api_port, '__analytics_api_port__': analytics_api_port, '__control_port__': control_port, '__dns_port__': dns_port, '__vrouter_agent_port__': agent_port, '__discovery_ip__': discovery_ip, '__config_api_ip__': config_api_ip, '__analytics_api_ip__': analytics_api_ip, '__user_isolation__': user_isolation, '__config_amqp_ips__': ','.join(config_amqp_ips), '__config_amqp_port__': config_amqp_port, }) ini_file = test_dir + '/' + 'sanity_params.ini' testbed_json_file = test_dir + '/' + 'sanity_testbed.json' with open(ini_file, 'w') as ini: ini.write(sanity_params) with open(testbed_json_file, 'w') as tb: tb.write(sanity_testbed_json) # Create /etc/contrail/openstackrc if not os.path.exists('/etc/contrail'): os.makedirs('/etc/contrail') with open('/etc/contrail/openstackrc', 'w') as rc: rc.write("export OS_USERNAME=%s\n" % admin_user) rc.write("export OS_PASSWORD=%s\n" % admin_password) rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant) rc.write("export OS_REGION_NAME=%s\n" % stack_region_name) rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol, auth_server_ip, auth_server_port)) rc.write("export OS_NO_CACHE=1\n") # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone config = ConfigParser.ConfigParser() config.optionxform = str vnc_api_ini = '/etc/contrail/vnc_api_lib.ini' if os.path.exists(vnc_api_ini): config.read(vnc_api_ini) if 'auth' not in config.sections(): config.add_section('auth') config.set('auth', 'AUTHN_TYPE', 'keystone') config.set('auth', 'AUTHN_PROTOCOL', auth_protocol) config.set('auth', 'AUTHN_SERVER', auth_server_ip) config.set('auth', 'AUTHN_PORT', auth_server_port) config.set('auth', 'AUTHN_URL', '/v2.0/tokens') with open(vnc_api_ini, 'w') as f: config.write(f) # If webui = True, in testbed, setup webui for sanity if webui: update_config_option('openstack', '/etc/keystone/keystone.conf', 'token', 'expiration', '86400', 'keystone') update_js_config('openstack', '/etc/contrail/config.global.js', 'contrail-webui')
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): """ Configure test environment by creating sanity_params.ini and sanity_testbed.json files """ print "Configuring test environment" sys.path.insert(0, contrail_fab_path) from fabfile.config import testbed from fabfile.utils.host import get_openstack_internal_vip, \ get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ get_authserver_port, get_env_passwords, get_authserver_credentials, \ get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \ get_authserver_protocol, get_region_name, get_contrail_internal_vip, \ get_openstack_external_vip, get_contrail_external_vip, \ get_apiserver_protocol, get_apiserver_certfile, get_apiserver_keyfile, \ get_apiserver_cafile, get_keystone_insecure_flag, \ get_apiserver_insecure_flag, get_keystone_certfile, get_keystone_keyfile, \ get_keystone_cafile, get_keystone_version from fabfile.utils.multitenancy import get_mt_enable from fabfile.utils.interface import get_data_ip from fabfile.tasks.install import update_config_option, update_js_config from fabfile.utils.fabos import get_as_sudo logger = contrail_logging.getLogger(__name__) def validate_and_copy_file(filename, source_host): with settings(host_string='%s' %(source_host), warn_only=True, abort_on_prompts=False): if exists(filename): filedir = os.path.dirname(filename) if not os.path.exists(filedir): os.makedirs(filedir) get_as_sudo(filename, filename) return filename return "" cfgm_host = env.roledefs['cfgm'][0] auth_protocol = get_authserver_protocol() try: auth_server_ip = get_authserver_ip() except Exception: auth_server_ip = None auth_server_port = get_authserver_port() api_auth_protocol = get_apiserver_protocol() if api_auth_protocol == 'https': api_certfile = validate_and_copy_file(get_apiserver_certfile(), cfgm_host) api_keyfile = validate_and_copy_file(get_apiserver_keyfile(), cfgm_host) api_cafile = validate_and_copy_file(get_apiserver_cafile(), cfgm_host) api_insecure_flag = get_apiserver_insecure_flag() else: api_certfile = "" api_keyfile = "" api_cafile = "" api_insecure_flag = True cert_dir = os.path.dirname(api_certfile) if auth_protocol == 'https': keystone_cafile = validate_and_copy_file(cert_dir + '/' +\ os.path.basename(get_keystone_cafile()), cfgm_host) keystone_certfile = validate_and_copy_file(cert_dir + '/' +\ os.path.basename(get_keystone_certfile()), cfgm_host) keystone_keyfile = keystone_certfile keystone_insecure_flag = istrue(os.getenv('OS_INSECURE', \ get_keystone_insecure_flag())) else: keystone_certfile = "" keystone_keyfile = "" keystone_cafile = "" keystone_insecure_flag = True with settings(warn_only=True), hide('everything'): with lcd(contrail_fab_path): if local('git branch').succeeded: fab_revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): fab_revision = run('cat /opt/contrail/contrail_packages/VERSION') with lcd(test_dir): if local('git branch').succeeded: revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): revision = run('cat /opt/contrail/contrail_packages/VERSION') sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms':[], 'vcenter_servers':[], 'hosts_ipmi': [], 'tor':[], 'sriov':[], 'dpdk':[], 'ns_agilio_vrouter':[], } sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if not getattr(env, 'test', None): env.test={} containers = env.test.get('containers') traffic_data = env.test.get('traffic_data') ixia_linux_host_ip = get_value_of_key(traffic_data, 'ixia_linux_host_ip') ixia_host_ip = get_value_of_key(traffic_data, 'ixia_host_ip') spirent_linux_host_ip = get_value_of_key(traffic_data, 'spirent_linux_host_ip') ixia_linux_username = get_value_of_key(traffic_data, 'ixia_linux_username') ixia_linux_password = get_value_of_key(traffic_data, 'ixia_linux_password') spirent_linux_username = get_value_of_key(traffic_data, 'spirent_linux_username') spirent_linux_password = get_value_of_key(traffic_data, 'spirent_linux_password') if env.get('orchestrator', 'openstack') == 'openstack': with settings(host_string = env.roledefs['openstack'][0]), hide('everything'): openstack_host_name = run("hostname") with settings(host_string = env.roledefs['cfgm'][0]), hide('everything'): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host), hide('everything'): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host), hide('everything'): host_name = run("hostname") cassandra_host_names.append(host_name) keystone_version = get_keystone_version() internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() contrail_internal_vip = get_contrail_internal_vip() contrail_external_vip = get_contrail_external_vip() multi_role_test = False for host_string in env.roledefs['all']: if host_string in env.roledefs.get('test',[]): for role in env.roledefs.iterkeys(): if role in ['test','all']: continue if host_string in env.roledefs.get(role,[]): multi_role_test=True break if not multi_role_test: continue host_ip = host_string.split('@')[1] with settings(host_string = host_string), hide('everything'): try: host_name = run("hostname") host_fqname = run("hostname -f") except: logger.warn('Unable to login to %s'%host_ip) continue host_dict = {} host_dict['ip'] = host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['fqname'] = host_fqname host_dict['username'] = host_string.split('@')[0] host_dict['password'] =get_env_passwords(host_string) host_dict['roles'] = [] if env.get('qos', {}): if host_string in env.qos.keys(): role_dict = env.qos[host_string] host_dict['qos'] = role_dict if env.get('qos_niantic', {}): if host_string in env.qos_niantic.keys(): role_dict = env.qos_niantic[host_string] host_dict['qos_niantic'] = role_dict if host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} role_dict['container'] = get_container_name(containers, host_string, 'openstack') host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} role_dict['container'] = get_container_name(containers, host_string, 'controller') if env.get('orchestrator', 'openstack') == 'openstack': role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['container'] = get_container_name(containers, host_string, 'controller') host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } role_dict['container'] = get_container_name(containers, host_string, 'analyticsdb') host_dict['roles'].append(role_dict) if not env.roledefs.get('compute'): env.roledefs['compute'] = [] if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['container'] = get_container_name(containers, host_string, 'agent') role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if host_string in env.roledefs.get('lb',[]): role_dict = {'type': 'lb', 'params': {'lb': host_name}} role_dict['container'] = get_container_name(containers, host_string, 'lb') host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } role_dict['container'] = get_container_name(containers, host_string, 'analytics') host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } role_dict['container'] = get_container_name(containers, host_string, 'controller') host_dict['roles'].append(role_dict) # Kube managers if 'contrail-kubernetes' in env.roledefs.keys() and \ host_string in env.roledefs['contrail-kubernetes']: role_dict = { 'type': 'contrail-kubernetes', 'params': {} } role_dict['container'] = get_container_name(containers, host_string, 'contrail-kube-manager') host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) #get sriov info if env.has_key('sriov'): sanity_testbed_dict['sriov'].append(env.sriov) #get dpdk info if env.has_key('dpdk'): sanity_testbed_dict['dpdk'].append(env.dpdk) #get k8s info sanity_testbed_dict['kubernetes'] = env.get('kubernetes', {}) #get ns_agilio_vrouter info if env.has_key('ns_agilio_vrouter'): sanity_testbed_dict['ns_agilio_vrouter'].append(env.ns_agilio_vrouter) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts if env.has_key('xmpp_auth_enable'): sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable if env.has_key('xmpp_dns_auth_enable'): sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable if env.has_key('metadata_ssl_enable'): sanity_testbed_dict['metadata_ssl_enable'] = env.metadata_ssl_enable if env.has_key('dm_mx'): sanity_testbed_dict['dm_mx'] = env.dm_mx # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'contrail_vm' in esxi_hosts[esxi]: host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host'] host_dict['roles'] = [] host_dict['type'] = 'esxi' sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) vcenter_servers = env.get('vcenter_servers') if vcenter_servers: for vcenter in vcenter_servers: sanity_testbed_dict['vcenter_servers'].append(vcenter_servers[vcenter]) orch = getattr(env, 'orchestrator', 'openstack') deployer = getattr(env, 'deployer', 'openstack') #get other orchestrators (vcenter etc) info if any slave_orch = None if env.has_key('other_orchestrators'): sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators for k,v in env.other_orchestrators.items(): if v['type'] == 'vcenter': slave_orch = 'vcenter' # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) # Setting slave orch to k8s when key present if env.has_key('kubernetes'): if sanity_testbed_dict['kubernetes']['mode'] == 'nested': slave_orch = 'kubernetes' # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stack_user = os.getenv('STACK_USER', env.get('stack_user', env.test.get('stack_user', ''))) stack_password = os.getenv('STACK_PASSWORD', env.test.get('stack_password','')) stack_tenant = os.getenv('STACK_TENANT', env.get('stack_tenant', env.test.get('stack_tenant', ''))) stack_domain = os.getenv('STACK_DOMAIN', env.get('stack_domain', env.test.get('stack_domain', ''))) use_project_scoped_token = env.test.get('use_project_scoped_token', '') if not env.has_key('domain_isolation'): env.domain_isolation = False if not env.has_key('cloud_admin_domain'): env.cloud_admin_domain = 'Default' if not env.has_key('cloud_admin_user'): env.cloud_admin_user = '******' if not env.has_key('cloud_admin_password'): env.cloud_admin_password = env.get('openstack_admin_password') domain_isolation = os.getenv('DOMAIN_ISOLATION', env.test.get('domain_isolation', env.domain_isolation)) cloud_admin_domain = os.getenv('CLOUD_ADMIN_DOMAIN', env.test.get('cloud_admin_domain', env.cloud_admin_domain)) cloud_admin_user = os.getenv('CLOUD_ADMIN_USER', env.test.get('cloud_admin_user', env.cloud_admin_user)) cloud_admin_password = os.getenv('CLOUD_ADMIN_PASSWORD', env.test.get('cloud_admin_password', env.cloud_admin_password)) tenant_isolation = os.getenv('TENANT_ISOLATION', env.test.get('tenant_isolation', '')) stop_on_fail = env.get('stop_on_fail', False) mail_to = os.getenv('MAIL_TO', env.test.get('mail_to', '')) log_scenario = env.get('log_scenario', 'Sanity') stack_region_name = get_region_name() admin_user, admin_password = get_authserver_credentials() if orch == 'kubernetes': admin_tenant = 'default' else: admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser webserver_host = os.getenv('WEBSERVER_HOST', env.test.get('webserver_host','')) webserver_user = os.getenv('WEBSERVER_USER', env.test.get('webserver_user', '')) webserver_password = os.getenv('WEBSERVER_PASSWORD', env.test.get('webserver_password', '')) webserver_log_path = os.getenv('WEBSERVER_LOG_PATH', env.test.get('webserver_log_path', '/var/www/contrail-test-ci/logs/')) webserver_report_path = os.getenv('WEBSERVER_REPORT_PATH', env.test.get('webserver_report_path', '/var/www/contrail-test-ci/reports/')) webroot = os.getenv('WEBROOT', env.test.get('webroot', 'contrail-test-ci')) mail_server = os.getenv('MAIL_SERVER', env.test.get('mail_server', '')) mail_port = os.getenv('MAIL_PORT', env.test.get('mail_port', '25')) fip_pool_name = os.getenv('FIP_POOL_NAME', env.test.get('fip_pool_name', 'floating-ip-pool')) public_virtual_network = os.getenv('PUBLIC_VIRTUAL_NETWORK', env.test.get('public_virtual_network', 'public')) public_tenant_name = os.getenv('PUBLIC_TENANT_NAME', env.test.get('public_tenant_name', 'admin')) fixture_cleanup = os.getenv('FIXTURE_CLEANUP', env.test.get('fixture_cleanup', 'yes')) generate_html_report = os.getenv('GENERATE_HTML_REPORT', env.test.get('generate_html_report', 'True')) keypair_name = os.getenv('KEYPAIR_NAME', env.test.get('keypair_name', 'contrail_key')) mail_sender = os.getenv('MAIL_SENDER', env.test.get('mail_sender', '*****@*****.**')) discovery_ip = os.getenv('DISCOVERY_IP', env.test.get('discovery_ip', '')) config_api_ip = os.getenv('CONFIG_API_IP', env.test.get('config_api_ip', '')) analytics_api_ip = os.getenv('ANALYTICS_API_IP', env.test.get('analytics_api_ip', '')) discovery_port = os.getenv('DISCOVERY_PORT', env.test.get('discovery_port', '')) config_api_port = os.getenv('CONFIG_API_PORT', env.test.get('config_api_port', '')) analytics_api_port = os.getenv('ANALYTICS_API_PORT', env.test.get('analytics_api_port', '')) control_port = os.getenv('CONTROL_PORT', env.test.get('control_port', '')) dns_port = os.getenv('DNS_PORT', env.test.get('dns_port', '')) agent_port = os.getenv('AGENT_PORT', env.test.get('agent_port', '')) user_isolation = os.getenv('USER_ISOLATION', env.test.get('user_isolation', False if stack_user else True)) neutron_username = os.getenv('NEUTRON_USERNAME', env.test.get('neutron_username', None)) availability_zone = os.getenv('AVAILABILITY_ZONE', env.test.get('availability_zone', None)) ci_flavor = os.getenv('CI_FLAVOR', env.test.get('ci_flavor', None)) kube_config_file = env.test.get('kube_config_file', '/etc/kubernetes/admin.conf') openshift_src_config_file = env.test.get('openshift_src_config_file', '/root/.kube/config') use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) fabric_gw = getattr(testbed, 'fabric_gw', '') fabric_gw_info = str(fabric_gw) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if not env.has_key('openstack'): env.openstack = {} if not env.has_key('cfgm'): env.cfgm = {} config_amqp_ip = env.openstack.get('amqp_host', '') if config_amqp_ip: config_amqp_ips = [config_amqp_ip] else: config_amqp_ips = [] # If amqp details are in env.cfgm as well, use that config_amqp_port = env.cfgm.get('amqp_port', '5672') config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips) key_filename = env.get('key_filename', '') pubkey_filename = env.get('pubkey_filename', '') vcenter_dc = '' if orch == 'vcenter' or slave_orch== 'vcenter': public_tenant_name='vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for vc in env.vcenter_servers: for dc in env.vcenter_servers[vc]['datacenters']: vcenter_dc = dc #global controller gc_host_mgmt = getattr(testbed, 'gc_host_mgmt', '') gc_host_control_data = getattr(testbed, 'gc_host_control_data', '') gc_user_name = getattr(testbed, 'gc_user_name', '') gc_user_pwd = getattr(testbed, 'gc_user_pwd', '') keystone_password = getattr(testbed, 'keystone_password', '') sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__keystone_version__' : keystone_version, '__use_project_scoped_token__': use_project_scoped_token, '__nova_keypair_name__' : keypair_name, '__orch__' : orch, '__deployer__' : deployer, '__admin_user__' : admin_user, '__admin_password__' : admin_password, '__admin_tenant__' : admin_tenant, '__domain_isolation__' : domain_isolation, '__cloud_admin_domain__' : cloud_admin_domain, '__cloud_admin_user__' : cloud_admin_user, '__cloud_admin_password__': cloud_admin_password, '__tenant_isolation__' : tenant_isolation, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__auth_ip__' : auth_server_ip, '__auth_port__' : auth_server_port, '__auth_protocol__' : auth_protocol, '__stack_region_name__' : stack_region_name, '__stack_tenant__' : stack_tenant, '__stack_domain__' : stack_domain, '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__key_filename__' : key_filename, '__pubkey_filename__' : pubkey_filename, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mail_sender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : False, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__fabric_gw_name_ip_tuple__': fabric_gw_info, '__public_vn_name__' : fip_pool_name, '__public_virtual_network__':public_virtual_network, '__public_tenant_name__' :public_tenant_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', ''), '__contrail_internal_vip__' : contrail_internal_vip, '__contrail_external_vip__' : contrail_external_vip, '__internal_vip__' : internal_vip, '__external_vip__' : external_vip, '__vcenter_dc__' : vcenter_dc, '__vcenter_server__' : get_vcenter_ip(), '__vcenter_port__' : get_vcenter_port(), '__vcenter_username__' : get_vcenter_username(), '__vcenter_password__' : get_vcenter_password(), '__vcenter_datacenter__' : get_vcenter_datacenter(), '__vcenter_compute__' : get_vcenter_compute(), '__use_devicemanager_for_md5__' : use_devicemanager_for_md5, '__discovery_port__' : discovery_port, '__config_api_port__' : config_api_port, '__analytics_api_port__' : analytics_api_port, '__control_port__' : control_port, '__dns_port__' : dns_port, '__vrouter_agent_port__' : agent_port, '__discovery_ip__' : discovery_ip, '__config_api_ip__' : config_api_ip, '__analytics_api_ip__' : analytics_api_ip, '__user_isolation__' : user_isolation, '__neutron_username__' : neutron_username, '__availability_zone__' : availability_zone, '__ci_flavor__' : ci_flavor, '__config_amqp_ips__' : ','.join(config_amqp_ips), '__config_amqp_port__' : config_amqp_port, '__api_auth_protocol__' : api_auth_protocol, '__api_certfile__' : api_certfile, '__api_keyfile__' : api_keyfile, '__api_cafile__' : api_cafile, '__api_insecure_flag__' : api_insecure_flag, '__keystone_certfile__' : keystone_certfile, '__keystone_keyfile__' : keystone_keyfile, '__keystone_cafile__' : keystone_cafile, '__keystone_insecure_flag__': keystone_insecure_flag, '__gc_host_mgmt__' : gc_host_mgmt, '__gc_host_control_data__': gc_host_control_data, '__gc_user_name__' : gc_user_name, '__gc_user_pwd__' : gc_user_pwd, '__keystone_password__' : keystone_password, '__slave_orch__' : slave_orch, '__ixia_linux_host_ip__' : ixia_linux_host_ip, '__ixia_host_ip__' : ixia_host_ip, '__spirent_linux_host_ip__': spirent_linux_host_ip, '__ixia_linux_username__' : ixia_linux_username, '__ixia_linux_password__' : ixia_linux_password, '__spirent_linux_username__': spirent_linux_username, '__spirent_linux_password__': spirent_linux_password, }) ini_file = test_dir + '/' + 'sanity_params.ini' testbed_json_file = test_dir + '/' + 'sanity_testbed.json' with open(ini_file, 'w') as ini: ini.write(sanity_params) with open(testbed_json_file,'w') as tb: tb.write(sanity_testbed_json) # Create /etc/contrail/openstackrc if not os.path.exists('/etc/contrail'): os.makedirs('/etc/contrail') keycertbundle = None if keystone_cafile and keystone_keyfile and keystone_certfile: bundle = '/tmp/keystonecertbundle.pem' certs = [keystone_certfile, keystone_keyfile, keystone_cafile] keycertbundle = utils.getCertKeyCaBundle(bundle, certs) with open('/etc/contrail/openstackrc','w') as rc: rc.write("export OS_USERNAME=%s\n" % admin_user) rc.write("export OS_PASSWORD=%s\n" % admin_password) rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant) rc.write("export OS_REGION_NAME=%s\n" % stack_region_name) rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol, auth_server_ip, auth_server_port)) rc.write("export OS_CACERT=%s\n" % keycertbundle) rc.write("export OS_CERT=%s\n" % keystone_certfile) rc.write("export OS_KEY=%s\n" % keystone_keyfile) rc.write("export OS_INSECURE=%s\n" % keystone_insecure_flag) rc.write("export OS_NO_CACHE=1\n") # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone config = ConfigParser.ConfigParser() config.optionxform = str vnc_api_ini = '/etc/contrail/vnc_api_lib.ini' if os.path.exists(vnc_api_ini): config.read(vnc_api_ini) if 'auth' not in config.sections(): config.add_section('auth') config.set('auth','AUTHN_TYPE', 'keystone') config.set('auth','AUTHN_PROTOCOL', auth_protocol) config.set('auth','AUTHN_SERVER', auth_server_ip) config.set('auth','AUTHN_PORT', auth_server_port) if keystone_version == 'v3': config.set('auth','AUTHN_URL', '/v3/auth/tokens') else: config.set('auth','AUTHN_URL', '/v2.0/tokens') if api_auth_protocol == 'https': if 'global' not in config.sections(): config.add_section('global') config.set('global','certfile', api_certfile) config.set('global','cafile', api_cafile) config.set('global','keyfile', api_keyfile) config.set('global','insecure',api_insecure_flag) if auth_protocol == 'https': if 'auth' not in config.sections(): config.add_section('auth') config.set('auth','certfile', keystone_certfile) config.set('auth','cafile', keystone_cafile) config.set('auth','keyfile', keystone_keyfile) config.set('auth','insecure', keystone_insecure_flag) with open(vnc_api_ini,'w') as f: config.write(f) # Get kube config file to the testrunner node if orch == 'kubernetes' or slave_orch == 'kubernetes': if not os.path.exists(kube_config_file): dir_name = os.path.dirname(kube_config_file) if not os.path.exists(dir_name): os.makedirs(dir_name) with settings(host_string = env.kubernetes['master']): if deployer == 'openshift' : get(openshift_src_config_file, kube_config_file) else: get(kube_config_file, kube_config_file) # If webui = True, in testbed, setup webui for sanity if webui: sku = get_build_sku(cfgm_host) update_config_option('openstack', '/etc/keystone/keystone.conf', 'token', 'expiration', '86400','keystone', sku) update_js_config('webui', '/etc/contrail/config.global.js', 'contrail-webui', container=is_container_env)
def create_vm(host, vm_template): with settings(host_string=host, password=get_env_passwords(host)): vm = VMLauncher(host, vm_template) vmi = vm.launch() return vmi
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): """ Configure test environment by creating sanity_params.ini and sanity_testbed.json files """ print "Configuring test environment" sys.path.insert(0, contrail_fab_path) from fabfile.testbeds import testbed from fabfile.utils.host import get_openstack_internal_vip, \ get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ get_authserver_port, get_env_passwords, get_authserver_credentials, \ get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \ get_authserver_protocol, get_region_name, get_contrail_internal_vip, \ get_openstack_external_vip, get_contrail_external_vip from fabfile.utils.multitenancy import get_mt_enable from fabfile.utils.interface import get_data_ip from fabfile.tasks.install import update_config_option, update_js_config cfgm_host = env.roledefs['cfgm'][0] auth_protocol = get_authserver_protocol() auth_server_ip = get_authserver_ip() auth_server_port = get_authserver_port() with settings(warn_only=True), hide('everything'): with lcd(contrail_fab_path): if local('git branch').succeeded: fab_revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): fab_revision = run('cat /opt/contrail/contrail_packages/VERSION') with lcd(test_dir): if local('git branch').succeeded: revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): revision = run('cat /opt/contrail/contrail_packages/VERSION') sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms':[], 'vcenter_servers':[], 'hosts_ipmi': [], 'tor':[], } sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if env.get('orchestrator', 'openstack') != 'vcenter': with settings(host_string = env.roledefs['openstack'][0]), hide('everything'): openstack_host_name = run("hostname") with settings(host_string = env.roledefs['cfgm'][0]), hide('everything'): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host), hide('everything'): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host), hide('everything'): host_name = run("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() contrail_internal_vip = get_contrail_internal_vip() contrail_external_vip = get_contrail_external_vip() multi_role_test = False for host_string in env.roledefs['all']: if host_string in env.roledefs.get('test',[]): for role in env.roledefs.iterkeys(): if role in ['test','all']: continue if host_string in env.roledefs.get(role,[]): multi_role_test=True break if not multi_role_test: continue host_ip = host_string.split('@')[1] with settings(host_string = host_string), hide('everything'): host_name = run("hostname") host_dict = {} host_dict['ip'] = host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] =get_env_passwords(host_string) host_dict['roles'] = [] if host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} if env.get('orchestrator', 'openstack') != 'vcenter': role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host'] host_dict['roles'] = [] sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) vcenter_servers = env.get('vcenter_servers') if vcenter_servers: for vcenter in vcenter_servers: host_dict = {} host_dict['server'] = vcenter_servers[vcenter]['server'] host_dict['port'] = vcenter_servers[vcenter]['port'] host_dict['username'] = vcenter_servers[vcenter]['username'] host_dict['password'] = vcenter_servers[vcenter]['password'] host_dict['datacenter'] = vcenter_servers[vcenter]['datacenter'] host_dict['auth'] = vcenter_servers[vcenter]['auth'] host_dict['cluster'] = vcenter_servers[vcenter]['cluster'] host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_switch']['dv_switch_name'] host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_port_group']['dv_portgroup_name'] sanity_testbed_dict['vcenter_servers'].append(host_dict) # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) if not getattr(env, 'test', None): env.test={} # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stack_user = env.test.get('stack_user', os.getenv('STACK_USER') or '') stack_password = env.test.get('stack_password', os.getenv('STACK_PASSWORD') or '') stack_tenant = env.test.get('stack_tenant', os.getenv('STACK_TENANT') or '') tenant_isolation = env.test.get('tenant_isolation', os.getenv('TENANT_ISOLATION') or '') stop_on_fail = env.get('stop_on_fail', False) mail_to = env.test.get('mail_to', os.getenv('MAIL_TO') or '') log_scenario = env.get('log_scenario', 'Sanity') stack_region_name = get_region_name() admin_user, admin_password = get_authserver_credentials() admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = env.get('stack_domain', 'default-domain') webserver_host = env.test.get('webserver_host', os.getenv('WEBSERVER_HOST') or '') webserver_user = env.test.get('webserver_user', os.getenv('WEBSERVER_USER') or '') webserver_password = env.test.get('webserver_password', os.getenv('WEBSERVER_PASSWORD') or '') webserver_log_path = env.test.get('webserver_log_path', os.getenv('WEBSERVER_LOG_PATH') or '/var/www/contrail-test-ci/logs/') webserver_report_path = env.test.get('webserver_report_path', os.getenv('WEBSERVER_REPORT_PATH') or '/var/www/contrail-test-ci/reports/') webroot = env.test.get('webroot', os.getenv('WEBROOT') or 'contrail-test-ci') mail_server = env.test.get('mail_server', os.getenv('MAIL_SERVER') or '') mail_port = env.test.get('mail_port', os.getenv('MAIL_PORT') or '25') fip_pool_name = env.test.get('fip_pool_name', os.getenv('FIP_POOL_NAME') or 'floating-ip-pool') public_virtual_network=env.test.get('public_virtual_network', os.getenv('PUBLIC_VIRTUAL_NETWORK') or 'public') public_tenant_name=env.test.get('public_tenant_name', os.getenv('PUBLIC_TENANT_NAME') or 'admin') fixture_cleanup = env.test.get('fixture_cleanup', os.getenv('FIXTURE_CLEANUP') or 'yes') generate_html_report = env.test.get('generate_html_report', os.getenv('GENERATE_HTML_REPORT') or 'True') keypair_name = env.test.get('keypair_name', os.getenv('KEYPAIR_NAME') or 'contrail_key') mail_sender = env.test.get('mail_sender', os.getenv('MAIL_SENDER') or '*****@*****.**') discovery_ip = env.test.get('discovery_ip', os.getenv('DISCOVERY_IP') or '') config_api_ip = env.test.get('config_api_ip', os.getenv('CONFIG_API_IP') or '') analytics_api_ip = env.test.get('analytics_api_ip', os.getenv('ANALYTICS_API_IP') or '') discovery_port = env.test.get('discovery_port', os.getenv('DISCOVERY_PORT') or '') config_api_port = env.test.get('config_api_port', os.getenv('CONFIG_API_PORT') or '') analytics_api_port = env.test.get('analytics_api_port', os.getenv('ANALYTICS_API_PORT') or '') control_port = env.test.get('control_port', os.getenv('CONTROL_PORT') or '') dns_port = env.test.get('dns_port', os.getenv('DNS_PORT') or '') agent_port = env.test.get('agent_port', os.getenv('AGENT_PORT') or '') user_isolation = env.test.get('user_isolation', bool(os.getenv('USER_ISOLATION') or True)) use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) orch = getattr(env, 'orchestrator', 'openstack') router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) key_filename = env.get('key_filename', '') pubkey_filename = env.get('pubkey_filename', '') vcenter_dc = '' if orch == 'vcenter': public_tenant_name='vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for k in env.vcenter_servers: vcenter_dc = env.vcenter_servers[k]['datacenter'] sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__nova_keypair_name__' : keypair_name, '__orch__' : orch, '__admin_user__' : admin_user, '__admin_password__' : admin_password, '__admin_tenant__' : admin_tenant, '__tenant_isolation__' : tenant_isolation, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__auth_ip__' : auth_server_ip, '__auth_port__' : auth_server_port, '__auth_protocol__' : auth_protocol, '__stack_region_name__' : stack_region_name, '__stack_tenant__' : stack_tenant, '__stack_domain__' : stack_domain, '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__key_filename__' : key_filename, '__pubkey_filename__' : pubkey_filename, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mail_sender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : False, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__' : fip_pool_name, '__public_virtual_network__':public_virtual_network, '__public_tenant_name__' :public_tenant_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', ''), '__contrail_internal_vip__' : contrail_internal_vip, '__contrail_external_vip__' : contrail_external_vip, '__internal_vip__' : internal_vip, '__external_vip__' : external_vip, '__vcenter_dc__' : vcenter_dc, '__vcenter_server__' : get_vcenter_ip(), '__vcenter_port__' : get_vcenter_port(), '__vcenter_username__' : get_vcenter_username(), '__vcenter_password__' : get_vcenter_password(), '__vcenter_datacenter__' : get_vcenter_datacenter(), '__vcenter_compute__' : get_vcenter_compute(), '__use_devicemanager_for_md5__' : use_devicemanager_for_md5, '__discovery_port__' : discovery_port, '__config_api_port__' : config_api_port, '__analytics_api_port__' : analytics_api_port, '__control_port__' : control_port, '__dns_port__' : dns_port, '__vrouter_agent_port__' : agent_port, '__discovery_ip__' : discovery_ip, '__config_api_ip__' : config_api_ip, '__analytics_api_ip__' : analytics_api_ip, '__user_isolation__' : user_isolation, }) ini_file = test_dir + '/' + 'sanity_params.ini' testbed_json_file = test_dir + '/' + 'sanity_testbed.json' with open(ini_file, 'w') as ini: ini.write(sanity_params) with open(testbed_json_file,'w') as tb: tb.write(sanity_testbed_json) # Create /etc/contrail/openstackrc if not os.path.exists('/etc/contrail'): os.makedirs('/etc/contrail') with open('/etc/contrail/openstackrc','w') as rc: rc.write("export OS_USERNAME=%s\n" % admin_user) rc.write("export OS_PASSWORD=%s\n" % admin_password) rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant) rc.write("export OS_REGION_NAME=%s\n" % stack_region_name) rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol, auth_server_ip, auth_server_port)) rc.write("export OS_NO_CACHE=1\n") # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone config = ConfigParser.ConfigParser() config.optionxform = str vnc_api_ini = '/etc/contrail/vnc_api_lib.ini' if os.path.exists(vnc_api_ini): config.read(vnc_api_ini) if 'auth' not in config.sections(): config.add_section('auth') config.set('auth','AUTHN_TYPE', 'keystone') config.set('auth','AUTHN_PROTOCOL', auth_protocol) config.set('auth','AUTHN_SERVER', auth_server_ip) config.set('auth','AUTHN_PORT', auth_server_port) config.set('auth','AUTHN_URL', '/v2.0/tokens') with open(vnc_api_ini,'w') as f: config.write(f) # If webui = True, in testbed, setup webui for sanity if webui: install_webui_packages(testbed) update_config_option('openstack', '/etc/keystone/keystone.conf', 'token', 'expiration', '86400','keystone') update_js_config('openstack', '/etc/contrail/config.global.js', 'contrail-webui')
def zookeeper_rolling_restart(): zoo_cfg = "/etc/zookeeper/conf/zoo.cfg" cfgm_nodes = copy.deepcopy(env.roledefs['cfgm']) database_nodes = copy.deepcopy(env.roledefs['database']) zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) % 2) != 1: print "Recommended to run odd number of zookeeper(database) nodes." print "Add a new node to the existing clusters testbed,py and install contrail-install-packages in it.\n\ Installing/Provisioning will be done as part of Upgrade" exit(0) if cfgm_nodes == database_nodes: print "No need for rolling restart." if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." return execute('stop_cfgm') execute('backup_zookeeper_database') old_nodes = list(set(cfgm_nodes).difference(set(database_nodes))) new_nodes = list(set(database_nodes).difference(set(cfgm_nodes))) for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) with settings(host_string=new_node, password=get_env_passwords(new_node)): pdist = detect_ostype() print "Install zookeeper in the new node." execute('create_install_repo_node', new_node) remove_package(['supervisor'], pdist) upgrade_package([ 'python-contrail', 'contrail-openstack-database', 'zookeeper' ], pdist) if pdist in ['ubuntu']: sudo("ln -sf /bin/true /sbin/chkconfig") sudo("chkconfig zookeeper on") print "Fix zookeeper configs" sudo( "sudo sed 's/^#log4j.appender.ROLLINGFILE.MaxBackupIndex=/log4j.appender.ROLLINGFILE.MaxBackupIndex=/g' /etc/zookeeper/conf/log4j.properties > log4j.properties.new" ) sudo( "sudo mv log4j.properties.new /etc/zookeeper/conf/log4j.properties" ) if pdist in ['centos']: sudo( 'echo export ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /usr/lib/zookeeper/bin/zkEnv.sh' ) if pdist in ['ubuntu']: sudo( 'echo ZOO_LOG4J_PROP="INFO,CONSOLE,ROLLINGFILE" >> /etc/zookeeper/conf/environment' ) print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) print "Add new nodes to existing zookeeper quorum" with settings(host_string=cfgm_nodes[0], password=get_env_passwords(cfgm_nodes[0])): for new_node in new_nodes: zk_index = (database_nodes.index(new_node) + len(cfgm_nodes) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(new_node), zoo_cfg)) tmp_dir = tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Restart zookeeper in all nodes to make new nodes join zookeeper quorum" for zookeeper_node in cfgm_nodes + new_nodes: with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): put(tmp_dir + '/zoo.cfg', zoo_cfg, use_sudo=True) print "Start Zookeeper in new database node" execute('restart_zookeeper') print "Waiting 5 seconds for the new nodes in the zookeeper quorum to be synced." sleep(5) print "Shutdown old nodes one by one and also make sure leader/follower election is complete after each shut downs" zoo_nodes = cfgm_nodes + database_nodes for old_node in old_nodes: zoo_nodes.remove(old_node) with settings(host_string=old_node, password=get_env_passwords(old_node)): print "Stop Zookeeper in old cfgm node" execute('stop_zookeeper') for zoo_node in zoo_nodes: with settings(host_string=zoo_node, password=get_env_passwords(zoo_node)): sudo("sed -i '/^server.*%s:2888:3888/d' %s" % (hstr_to_ip(zoo_node), zoo_cfg)) retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*zoo_nodes) if (len(zoo_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break elif (len(zoo_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is formed properly." break else: retries -= 1 if retries: for zoo_node in zoo_nodes: with settings( host_string=zoo_node, password=get_env_passwords(zoo_node)): execute('restart_zookeeper') continue print "Zookeeper quorum is not formed. Fix it and retry upgrade" print zookeeper_status exit(1) print "Correct the server id in zoo.cfg for the new nodes in the zookeeper quorum" with settings(host_string=database_nodes[0], password=get_env_passwords(database_nodes[0])): sudo("sed -i '/^server.*3888/d' %s" % zoo_cfg) for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) sudo('echo "server.%d=%s:2888:3888" >> %s' % (zk_index, hstr_to_ip(zookeeper_node), zoo_cfg)) tmp_dir = tempfile.mkdtemp() get_as_sudo(zoo_cfg, tmp_dir) print "Correct the myid in myid file for the new nodes in the zookeeper quorum" for zookeeper_node in database_nodes: zk_index = (database_nodes.index(zookeeper_node) + 1) with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): print "put cluster-unique zookeeper's instance id in myid" sudo('sudo echo "%s" > /var/lib/zookeeper/myid' % (zk_index)) execute('stop_zookeeper') print "Restart all the zookeeper nodes in the new quorum" for zookeeper_node in database_nodes: with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): put(tmp_dir + '/zoo.cfg', zoo_cfg, use_sudo=True) execute('restart_zookeeper') print "Make sure leader/folower election is complete" with settings(host_string=zookeeper_node, password=get_env_passwords(zookeeper_node)): retries = 3 while retries: zookeeper_status = verfiy_zookeeper(*database_nodes) if (len(database_nodes) > 1 and 'leader' in zookeeper_status.values() and 'follower' in zookeeper_status.values() and 'notrunning' not in zookeeper_status.values() and 'notinstalled' not in zookeeper_status.values() and 'standalone' not in zookeeper_status.values()): print zookeeper_status break elif (len(database_nodes) == 1 and 'notinstalled' not in zookeeper_status.values() and 'standalone' in zookeeper_status.values()): print zookeeper_status print "Zookeeper quorum is already formed properly." break else: retries -= 1 if retries: continue print "Zookeepr leader/follower election has problems. Fix it and retry upgrade" print zookeeper_status exit(1)