Esempio n. 1
0
def setup_galera_cluster():
    """Task to cluster the openstack nodes with galera"""
    if len(env.roledefs["openstack"]) <= 1:
        print "Single Openstack cluster, skipping galera cluster setup."
        return

    if env.roledefs["openstack"].index(env.host_string) == 0:
        execute("setup_passwordless_ssh", *env.roledefs["openstack"])
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)

    openstack_host_list = [get_control_host_string(openstack_host) for openstack_host in env.roledefs["openstack"]]
    galera_ip_list = [hstr_to_ip(galera_host) for galera_host in openstack_host_list]
    authserver_ip = get_authserver_ip()
    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()

    zoo_ip_list = [hstr_to_ip(get_control_host_string(cassandra_host)) for cassandra_host in env.roledefs["database"]]

    monitor_galera = "False"
    if get_openstack_internal_vip():
        monitor_galera = "True"

    cmon_db_user = "******"
    cmon_db_pass = "******"
    keystone_db_user = "******"
    keystone_db_pass = "******"
    with cd(INSTALLER_DIR):
        cmd = (
            "setup-vnc-galera\
            --self_ip %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d --zoo_ip_list %s --keystone_user %s\
            --keystone_pass %s --cmon_user %s --cmon_pass %s --monitor_galera %s"
            % (
                self_ip,
                keystone_ip,
                " ".join(galera_ip_list),
                internal_vip,
                (openstack_host_list.index(self_host) + 1),
                " ".join(zoo_ip_list),
                keystone_db_user,
                keystone_db_pass,
                cmon_db_user,
                cmon_db_pass,
                monitor_galera,
            )
        )

        if external_vip:
            cmd += " --external_vip %s" % external_vip
        sudo(cmd)
Esempio n. 2
0
def uninstall_openstack_node(*args):
    """Uninstalls openstack pkgs in one or list of nodes. USAGE:fab uninstall_openstack_node:[email protected],[email protected]"""
    for host_string in args:
        with settings(host_string=host_string):
            pkg = ['contrail-openstack']
            if len(env.roledefs['openstack']
                   ) > 1 and get_openstack_internal_vip():
                pkg.append('contrail-openstack-ha')
            if detect_ostype() == 'ubuntu':
                with settings(warn_only=True):
                    sudo("umount /var/lib/glance/images")
                sudo("sed -i '/.*glance.*/d' /etc/fstab")
                apt_uninstall(pkg)
            else:
                pkgs = get_pkg_list()
                yum_uninstall(pkgs)
            with cd('/etc/'):
                sudo(
                    'sudo rm -rf glance/ cinder/ openstack_dashboard/ keystone/ quantum/ nova/ haproxy/ keepalived/'
                )
            with cd('/var/lib/'):
                sudo(
                    'sudo rm -rf nova quantum glance quantum keystone mysql haproxy'
                )
            with cd('/var/run'):
                sudo('sudo rm -rf cinder glance quantum nova keystone')
            with cd('/var/log'):
                sudo(
                    'sudo rm -rf contrail/* nova quantum glance cinder ~/keystone-signing /tmp/keystone-signing /tmp/keystone-signing-nova'
                )
Esempio n. 3
0
def remove_node_from_galera(del_galera_node):
    """Task to remove a node from the galera cluster. Removes config from other Galera nodes """

    if len(env.roledefs['openstack']) < 3:
        raise RuntimeError("Galera cluster needs of quorum of at least 3 nodes! Cannot remove the node from cluster")

    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    authserver_ip = get_authserver_ip()
    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()
    zoo_ip_list = [hstr_to_ip(get_control_host_string(\
                    cassandra_host)) for cassandra_host in env.roledefs['database']]

    with cd(INSTALLER_DIR):
        cmd = "remove-galera-node\
            --self_ip %s --node_to_del %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d --zoo_ip_list %s" % (self_ip, del_galera_node,
                authserver_ip, ' '.join(galera_ip_list), internal_vip,
                (openstack_host_list.index(self_host) + 1), ' '.join(zoo_ip_list))

        if external_vip:
             cmd += ' --external_vip %s' % external_vip
        sudo(cmd)
Esempio n. 4
0
def setup_galera_cluster():
    """Task to cluster the openstack nodes with galera"""
    if len(env.roledefs['openstack']) <= 1:
        print "Single Openstack cluster, skipping galera cluster setup."
        return

    if env.roledefs['openstack'].index(env.host_string) == 0:
        execute('setup_passwordless_ssh', *env.roledefs['openstack'])
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]

    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    keystone_ip = get_keystone_ip()
    internal_vip = get_openstack_internal_vip()

    with cd(INSTALLER_DIR):
        run("PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-galera.py\
            --self_ip %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d" % (openstack_host_password,
                openstack_admin_password, self_ip, keystone_ip,
                ' '.join(galera_ip_list), internal_vip,
                (openstack_host_list.index(self_host) + 1)))
Esempio n. 5
0
def setup_keepalived_node(role):
    """Task to provision VIP for node with keepalived"""
    mgmt_ip = hstr_to_ip(env.host_string)
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)

    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()
    if role == 'cfgm':
        internal_vip = get_contrail_internal_vip()
        external_vip = get_contrail_external_vip()
    keepalived_host_list = [get_control_host_string(keepalived_host)\
                           for keepalived_host in env.roledefs[role]]
    myindex = keepalived_host_list.index(self_host)
    if myindex >= 1:
        # Wait for VIP to be assiciated to MASTER
        with settings(host_string=env.roledefs[role][0], warn_only=True):
            while sudo("ip addr | grep %s" % internal_vip).failed:
                sleep(2)
                print "Waiting for VIP to be associated to MASTER VRRP."
                continue
 
    with cd(INSTALLER_DIR):
        cmd = "setup-vnc-keepalived\
               --self_ip %s --internal_vip %s --mgmt_self_ip %s\
               --self_index %d --num_nodes %d --role %s" % ( self_ip,
                internal_vip, mgmt_ip, (keepalived_host_list.index(self_host) + 1),
                len(env.roledefs[role]), role)
        if external_vip:
             cmd += ' --external_vip %s' % external_vip
        sudo(cmd)
def uninstall_openstack_node(*args):
    """Uninstalls openstack pkgs in one or list of nodes. USAGE:fab uninstall_openstack_node:[email protected],[email protected]"""
    for host_string in args:
        with settings(host_string=host_string):
            pkg = ['contrail-openstack']
            if len(env.roledefs['openstack']) > 1 and get_openstack_internal_vip():
                pkg.append('contrail-openstack-ha')
            if is_ceilometer_install_supported():
                pkg += get_openstack_ceilometer_pkgs()
                pkg += get_ceilometer_plugin_pkgs()
            if detect_ostype() == 'ubuntu':
                with settings(warn_only=True):
                    sudo("umount /var/lib/glance/images")
                sudo("sed -i '/.*glance.*/d' /etc/fstab")
                apt_uninstall(pkg)
            else:
                pkgs = get_pkg_list()
                yum_uninstall(pkgs)
            with cd('/etc/'):
                sudo('sudo rm -rf glance/ cinder/ openstack_dashboard/ keystone/ quantum/ nova/ haproxy/ keepalived/')
            with cd('/var/lib/'):
                sudo('sudo rm -rf nova quantum glance quantum keystone mysql haproxy')
            with cd('/var/run'):
                sudo('sudo rm -rf cinder glance quantum nova keystone')
            with cd('/var/log'):
                sudo('sudo rm -rf contrail/* nova quantum glance cinder ~/keystone-signing /tmp/keystone-signing /tmp/keystone-signing-nova')
Esempio n. 7
0
def setup_keystone_ssl_certs_node(*nodes):
    default_certfile = '/etc/keystone/ssl/certs/keystone.pem'
    default_keyfile = '/etc/keystone/ssl/private/keystone.key'
    default_cafile = '/etc/keystone/ssl/certs/keystone_ca.pem'
    keystonecertbundle = get_keystone_cert_bundle()
    ssl_certs = ((get_keystone_certfile(), default_certfile),
                 (get_keystone_keyfile(), default_keyfile),
                 (get_keystone_cafile(), default_cafile))
    index = env.roledefs['openstack'].index(env.host_string) + 1
    for node in nodes:
        with settings(host_string=node, password=get_env_passwords(node)):
            for ssl_cert, default in ssl_certs:
                if ssl_cert == default:
                    # Clear old certificate
                    sudo('rm -f %s' % ssl_cert)
                    sudo('rm -f %s' % keystonecertbundle)
            for ssl_cert, default in ssl_certs:
                if ssl_cert == default:
                    openstack_host = env.roledefs['openstack'][0]
                    if index == 1:
                        if not exists(ssl_cert, use_sudo=True):
                            print "Creating keystone SSL certs in first openstack node"
                            subject_alt_names_mgmt = [hstr_to_ip(host)
                                                      for host in env.roledefs['openstack']]
                            subject_alt_names_ctrl = [hstr_to_ip(get_control_host_string(host))
                                                      for host in env.roledefs['openstack']]
                            subject_alt_names = subject_alt_names_mgmt + subject_alt_names_ctrl
                            if get_openstack_external_vip():
                                subject_alt_names.append(get_openstack_external_vip())
                            sudo('create-keystone-ssl-certs.sh %s %s' % (
                                    get_openstack_internal_vip() or
                                    hstr_to_ip(get_control_host_string(openstack_host)),
                                    ','.join(subject_alt_names)))
                    else:
                        with settings(host_string=openstack_host,
                                      password=get_env_passwords(openstack_host)):
                            while not exists(ssl_cert, use_sudo=True):
                                print "Wait for SSL certs to be created in first openstack"
                                sleep(0.1)
                            print "Get SSL cert(%s) from first openstack" % ssl_cert
                            tmp_dir= tempfile.mkdtemp()
                            tmp_fname = os.path.join(tmp_dir, os.path.basename(ssl_cert))
                            get_as_sudo(ssl_cert, tmp_fname)
                        print "Copy to this(%s) openstack node" % env.host_string 
                        sudo('mkdir -p /etc/keystone/ssl/certs/')
                        sudo('mkdir -p /etc/keystone/ssl/private/')
                        put(tmp_fname, ssl_cert, use_sudo=True)
                        os.remove(tmp_fname)
                elif os.path.isfile(ssl_cert): 
                    print "Certificate (%s) exists locally" % ssl_cert
                    put(ssl_cert, default, use_sudo=True)
                elif exists(ssl_cert, use_sudo=True): 
                    print "Certificate (%s) exists in openstack node" % ssl_cert
                    pass
                else:
                    raise RuntimeError("%s doesn't exists locally or in openstack node")
            if not exists(keystonecertbundle, use_sudo=True):
                ((certfile, _), (keyfile, _), (cafile, _)) = ssl_certs
                sudo('cat %s %s > %s' % (certfile, cafile, keystonecertbundle))
            sudo("chown -R keystone:keystone /etc/keystone/ssl")
Esempio n. 8
0
def setup_keepalived_node(role):
    """Task to provision VIP for node with keepalived"""
    mgmt_ip = hstr_to_ip(env.host_string)
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)

    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()
    if role == 'cfgm':
        internal_vip = get_contrail_internal_vip()
        external_vip = get_contrail_external_vip()
    keepalived_host_list = [get_control_host_string(keepalived_host)\
                           for keepalived_host in env.roledefs[role]]
    myindex = keepalived_host_list.index(self_host)
    if myindex >= 1:
        # Wait for VIP to be assiciated to MASTER
        with settings(host_string=env.roledefs[role][0], warn_only=True):
            while sudo("ip addr | grep %s" % internal_vip).failed:
                sleep(2)
                print "Waiting for VIP to be associated to MASTER VRRP."
                continue

    with cd(INSTALLER_DIR):
        cmd = "setup-vnc-keepalived\
               --self_ip %s --internal_vip %s --mgmt_self_ip %s\
               --self_index %d --num_nodes %d --role %s" % (
            self_ip, internal_vip, mgmt_ip,
            (keepalived_host_list.index(self_host) + 1), len(
                env.roledefs[role]), role)
        if external_vip:
            cmd += ' --external_vip %s' % external_vip
        sudo(cmd)
Esempio n. 9
0
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    amqp_roles = []
    rabbit_servers = get_from_testbed_dict('cfgm', 'amqp_hosts', None)
    if rabbit_servers:
        print "Using external rabbitmq servers %s" % rabbit_servers
    else:
        # Provision rabbitmq cluster in cfgm role nodes.
        print "Provisioning rabbitq in cfgm nodes"
        amqp_roles = ['cfgm']

    # Provision rabbitmq cluster in openstack on request
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')

    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute("verify_cluster_status", retry='no')
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        if not is_xenial_or_above():
            execute(listen_at_supervisor_support_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(60)
        #execute(rabbitmqctl_stop_app)
        #execute(rabbitmqctl_reset)
        #execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        #execute(add_node_to_rabbitmq_cluster)
        #execute(rabbitmqctl_start_app)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')
            execute('set_tcp_keepalive_on_compute')
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Esempio n. 10
0
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    amqp_roles = []
    rabbit_servers = get_from_testbed_dict('cfgm', 'amqp_hosts', None)
    if rabbit_servers:
        print "Using external rabbitmq servers %s" % rabbit_servers
    else:
        # Provision rabbitmq cluster in cfgm role nodes.
        print "Provisioning rabbitq in cfgm nodes"
        amqp_roles = ['cfgm']

    # Provision rabbitmq cluster in openstack on request
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')

    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute("verify_cluster_status", retry='no')
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        execute(listen_at_supervisor_support_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(60)
        #execute(rabbitmqctl_stop_app)
        #execute(rabbitmqctl_reset)
        #execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        #execute(add_node_to_rabbitmq_cluster)
        #execute(rabbitmqctl_start_app)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')
            execute('set_tcp_keepalive_on_compute')
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Esempio n. 11
0
def install_openstack_node(*args):
    """Installs openstack pkgs in one or list of nodes. USAGE:fab install_openstack_node:[email protected],[email protected]"""
    for host_string in args:
        with settings(host_string=host_string):
            pkg = ['contrail-openstack']
            if len(env.roledefs['openstack']) > 1 and get_openstack_internal_vip():
                pkg.append('contrail-openstack-ha')
            if detect_ostype() == 'Ubuntu':
                apt_install(pkg)
            else:
                yum_install(pkg)
Esempio n. 12
0
def install_openstack_node(*args):
    """Installs openstack pkgs in one or list of nodes. USAGE:fab install_openstack_node:[email protected],[email protected]"""
    for host_string in args:
        with settings(host_string=host_string):
            pkg = ['contrail-openstack']
            if len(env.roledefs['openstack']
                   ) > 1 and get_openstack_internal_vip():
                pkg.append('contrail-openstack-ha')
            if detect_ostype() == 'ubuntu':
                apt_install(pkg)
            else:
                yum_install(pkg)
Esempio n. 13
0
def join_orchestrator(new_ctrl_host):
    orch = get_orchestrator()
    if orch == 'openstack':
        execute('increase_ulimits_node', new_ctrl_host)
        execute('setup_openstack_node', new_ctrl_host)
        if is_package_installed('contrail-openstack-dashboard'):
            execute('setup_contrail_horizon_node', new_ctrl_host)
        if get_openstack_internal_vip():
            execute('sync_keystone_ssl_certs_node', new_ctrl_host)
            execute('setup_cluster_monitors_node', new_ctrl_host)
        with settings(host_string = new_ctrl_host):
            sudo('service supervisor-openstack restart')
        execute('verify_openstack')
Esempio n. 14
0
def purge_node_from_keepalived_cluster(del_ctrl_ip, role_to_purge):
    if not ping_test(del_ctrl_ip):
        print "Keepalived config is not deactivated in %s since it is not reachable" % del_ctrl_ip
        return

    with settings(host_string=del_ctrl_ip, warn_only=True):
        # If the node is no more part of an Openstack or Contrail
        # Cluster, then stop keepalived from running and empty
        # out the config.
        if role_to_purge == 'all':
            sudo('service keepalived stop')
            sudo('mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.removed')
            sudo("chkconfig keepalived off")
        elif role_to_purge == 'openstack' and\
             get_contrail_internal_vip() != get_openstack_internal_vip():
            # Case where there are two VIPs for OS and Contrail and one of them
            # should be active.
            setup_keepalived_node('cfgm')
        elif role_to_purge == 'cfgm' and\
             get_contrail_internal_vip() != get_openstack_internal_vip():
            setup_keepalived_node('openstack')
        else:
            raise RuntimeError("Invalid options for removing keepalived node from a cluster")
Esempio n. 15
0
def setup_cmon_schema():
    """Task to configure cmon schema in the openstack nodes to monitor galera cluster"""
    if len(env.roledefs['openstack']) <= 1:
        print "Single Openstack cluster, skipping cmon schema  setup."
        return

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    internal_vip = get_openstack_internal_vip()

    mysql_token = sudo("cat /etc/contrail/mysql.token")
    pdist = detect_ostype()
    if pdist in ['ubuntu']:
        mysql_svc = 'mysql'
    elif pdist in ['centos', 'redhat']:
        mysql_svc = 'mysqld'
    # Create cmon schema
    sudo('mysql -u root -p%s -e "CREATE SCHEMA IF NOT EXISTS cmon"' %
         mysql_token)
    sudo('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_db.sql' %
         mysql_token)
    sudo('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_data.sql' %
         mysql_token)

    # insert static data
    sudo(
        'mysql -u root -p%s -e "use cmon; insert into cluster(type) VALUES (\'galera\')"'
        % mysql_token)

    host_list = galera_ip_list + ['localhost', '127.0.0.1', internal_vip]
    # Create cmon user
    for host in host_list:
        mysql_cmon_user_cmd = 'mysql -u root -p%s -e "CREATE USER \'cmon\'@\'%s\' IDENTIFIED BY \'cmon\'"' % (
            mysql_token, host)
        with settings(hide('everything'), warn_only=True):
            sudo(mysql_cmon_user_cmd)

    mysql_cmd = "mysql -uroot -p%s -e" % mysql_token
    # Grant privilages for cmon user.
    for host in host_list:
        sudo(
            '%s "GRANT ALL PRIVILEGES on *.* TO cmon@%s IDENTIFIED BY \'cmon\' WITH GRANT OPTION"'
            % (mysql_cmd, host))
    # Restarting mysql in all openstack nodes
    for host_string in env.roledefs['openstack']:
        with settings(host_string=host_string):
            sudo("service %s restart" % mysql_svc)
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ['cfgm']
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')
    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute(verify_cluster_status)
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        execute(listen_at_supervisor_config_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        if len(env.roledefs['rabbit']) <= 1:
            print "Single cfgm cluster, Starting rabbitmq."
            return
        #execute(rabbitmqctl_stop_app)
        #execute(rabbitmqctl_reset)
        #execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        #execute(add_node_to_rabbitmq_cluster)
        #execute(rabbitmqctl_start_app)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')
            execute('set_tcp_keepalive_on_compute')
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Esempio n. 17
0
def join_rabbitmq_cluster(new_ctrl_host):
    """ Task to join a new rabbit server into an existing cluster """
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ['cfgm']
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        #Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')
    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        # copy the erlang cookie from one of the other nodes.
        rabbitmq_cluster_uuid = None
        for host_string in env.roledefs['rabbit']:
            with settings(host_string=host_string, warn_only=True):
                if host_string != new_ctrl_host and\
                   sudo('ls /var/lib/rabbitmq/.erlang.cookie').succeeded:
                    rabbitmq_cluster_uuid = \
                        sudo('cat /var/lib/rabbitmq/.erlang.cookie')
                    break;
        if rabbitmq_cluster_uuid is None:
            raise RuntimeError("Not able to get the Erlang cookie from the cluster nodes")

        if not is_xenial_or_above():
            execute(listen_at_supervisor_support_port_node, new_ctrl_host)
        execute(remove_mnesia_database_node, new_ctrl_host)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port_node, new_ctrl_host)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute('stop_rabbitmq_and_set_cookie_node', rabbitmq_cluster_uuid, new_ctrl_host)
        execute('start_rabbitmq_node', new_ctrl_host)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(30)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')

        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Esempio n. 18
0
def purge_node_from_rabbitmq_cluster(del_rabbitmq_node, role):

    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'no' and\
                             role == 'openstack':
        # We are not managing the RabbitMQ server. No-op.
        return

    if get_contrail_internal_vip() != get_openstack_internal_vip() and\
       role == 'cfgm':
        # Openstack and Contrail are in two different nodes. Cfgm
        # rabbitmq will point to the Openstack node. No-op.
        return

    env.roledefs['rabbit'] = env.roledefs[role]
    del_rabbitmq_ip = hstr_to_ip(del_rabbitmq_node)
    del_rabbitmq_ctrl_ip = hstr_to_ip(get_control_host_string(del_rabbitmq_node))
    if ping_test(del_rabbitmq_node):
        with settings(host_string = del_rabbitmq_node, warn_only = True):
            sudo("rabbitmqctl stop_app")
            sudo("rabbitmqctl reset")
            sudo("service supervisor-support-service stop")
            sudo("mv /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.removed")
            sudo("mv /etc/rabbitmq/rabbitmq.config /etc/rabbitmq/rabbitmq.config.removed")
    else:
        # If the node is not reachable, then delete the node remotely from one
        # of the nodes in the cluster.
        with settings(host_string = env.roledefs['rabbit'][0], warn_only = True):
            hostname = local('getent hosts %s | awk \'{print $3\'}' % del_rabbitmq_ctrl_ip, capture = True)
            sudo("rabbitmqctl forget_cluster_node rabbit@%s" % hostname)

    # Giving some time for the other nodes to re-adjust the cluster, 
    time.sleep(30)

    execute(config_rabbitmq)
    for host_string in env.roledefs[role]:
        with settings(host_string = host_string):
            sudo("service rabbitmq-server restart")
            # Give time for RabbitMQ to recluster
            time.sleep(30)

    result = execute(verify_cluster_status)
    if False in result.values():
        print "Unable to recluster RabbitMQ cluster after removing the node %s" % del_rabbitmq_node
        exit(1)
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ["cfgm"]
    if get_from_testbed_dict("openstack", "manage_amqp", "no") == "yes":
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append("openstack")
    for role in amqp_roles:
        env.roledefs["rabbit"] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute("verify_cluster_status", retry="no")
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, "rabbitmq_cluster_uuid", None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        execute(listen_at_supervisor_support_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        # adding sleep to workaround rabbitmq bug 26370 prevent "rabbitmqctl cluster_status" from breaking the database, this is seen in ci
        time.sleep(60)
        # execute(rabbitmqctl_stop_app)
        # execute(rabbitmqctl_reset)
        # execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        # execute(add_node_to_rabbitmq_cluster)
        # execute(rabbitmqctl_start_app)
        if role is "openstack" and get_openstack_internal_vip() or role is "cfgm" and get_contrail_internal_vip():
            execute("set_ha_policy_in_rabbitmq")
            execute("set_tcp_keepalive")
            execute("set_tcp_keepalive_on_compute")
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Esempio n. 20
0
def join_rabbitmq_cluster(new_ctrl_host):
    """ Task to join a new rabbit server into an existing cluster """
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ['cfgm']
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        #Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')
    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        # copy the erlang cookie from one of the other nodes.
        rabbitmq_cluster_uuid = None
        for host_string in env.roledefs['rabbit']:
            with settings(host_string=host_string, warn_only=True):
                if host_string != new_ctrl_host and\
                   sudo('ls /var/lib/rabbitmq/.erlang.cookie').succeeded:
                    rabbitmq_cluster_uuid = \
                        sudo('cat /var/lib/rabbitmq/.erlang.cookie')
                    break;
        if rabbitmq_cluster_uuid is None:
            raise RuntimeError("Not able to get the Erlang cookie from the cluster nodes")

        execute(listen_at_supervisor_support_port_node, new_ctrl_host)
        execute(remove_mnesia_database_node, new_ctrl_host)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port_node, new_ctrl_host)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute('stop_rabbitmq_and_set_cookie_node', rabbitmq_cluster_uuid, new_ctrl_host)
        execute('start_rabbitmq_node', new_ctrl_host)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(30)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')

        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Esempio n. 21
0
def setup_cmon_schema():
    """Task to configure cmon schema in the openstack nodes to monitor galera cluster"""
    if len(env.roledefs['openstack']) <= 1:
        print "Single Openstack cluster, skipping cmon schema  setup."
        return

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    internal_vip = get_openstack_internal_vip()

    mysql_token = run("cat /etc/contrail/mysql.token")
    pdist = detect_ostype()
    if pdist in ['Ubuntu']:
        mysql_svc = 'mysql'
    elif pdist in ['centos', 'redhat']:
        mysql_svc = 'mysqld'
    # Create cmon schema
    run('mysql -u root -p%s -e "CREATE SCHEMA IF NOT EXISTS cmon"' % mysql_token)
    run('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_db.sql' % mysql_token)
    run('mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_data.sql' % mysql_token)

    # insert static data
    run('mysql -u root -p%s -e "use cmon; insert into cluster(type) VALUES (\'galera\')"' % mysql_token)

    host_list = galera_ip_list + ['localhost', '127.0.0.1', internal_vip]
    # Create cmon user
    for host in host_list:
        mysql_cmon_user_cmd = 'mysql -u root -p%s -e "CREATE USER \'cmon\'@\'%s\' IDENTIFIED BY \'cmon\'"' % (
                               mysql_token, host)
        with settings(hide('everything'),warn_only=True):
            run(mysql_cmon_user_cmd)

    mysql_cmd =  "mysql -uroot -p%s -e" % mysql_token
    # Grant privilages for cmon user.
    for host in host_list:
        run('%s "GRANT ALL PRIVILEGES on *.* TO cmon@%s IDENTIFIED BY \'cmon\' WITH GRANT OPTION"' %
               (mysql_cmd, host))
    # Restarting mysql in all openstack nodes
    for host_string in env.roledefs['openstack']:
        with settings(host_string=host_string):
            run("service %s restart" % mysql_svc)
Esempio n. 22
0
def setup_keystone_ssl_certs_node(*nodes):
    default_certfile = '/etc/keystone/ssl/certs/keystone.pem'
    default_keyfile = '/etc/keystone/ssl/private/keystone.key'
    default_cafile = '/etc/keystone/ssl/certs/keystone_ca.pem'
    ssl_certs = ((get_keystone_certfile(), default_certfile),
                 (get_keystone_keyfile(), default_keyfile),
                 (get_keystone_cafile(), default_cafile))
    index = env.roledefs['openstack'].index(env.host_string) + 1
    for node in nodes:
        with settings(host_string=node, password=get_env_passwords(node)):
            for ssl_cert, default in ssl_certs:
                if ssl_cert == default:
                    # Clear old certificate
                    sudo('rm -f %s' % ssl_cert)
            for ssl_cert, default in ssl_certs:
                if ssl_cert == default:
                    openstack_host = env.roledefs['openstack'][0]
                    if index == 1:
                        if not exists(ssl_cert, use_sudo=True):
                            print "Creating keystone SSL certs in first openstack node"
                            sudo('create-keystone-ssl-certs.sh %s' % (
                                    get_openstack_internal_vip() or hstr_to_ip(openstack_host)))
                    else:
                        with settings(host_string=openstack_host,
                                      password=get_env_passwords(openstack_host)):
                            while not exists(ssl_cert, use_sudo=True):
                                print "Wait for SSL certs to be created in first openstack"
                                sleep(0.1)
                            print "Get SSL cert(%s) from first openstack" % ssl_cert
                            tmp_fname = os.path.join('/tmp', os.path.basename(ssl_cert))
                            get_as_sudo(ssl_cert, tmp_fname)
                        print "Copy to this(%s) openstack node" % env.host_string 
                        put(tmp_fname, ssl_cert, use_sudo=True)
                        os.remove(tmp_fname)
                elif os.path.isfile(ssl_cert): 
                    print "Certificate (%s) exists locally" % ssl_cert
                    put(ssl_cert, default, use_sudo=True)
                elif exists(ssl_cert, use_sudo=True): 
                    print "Certificate (%s) exists in openstack node" % ssl_cert
                    pass
                else:
                    raise RuntimeError("%s doesn't exists locally or in openstack node")
                sudo("chown -R keystone:keystone /etc/keystone/ssl")
Esempio n. 23
0
def setup_ha():
    execute('pre_check')

    if get_contrail_internal_vip():
        print "Contrail HA setup, provisioning contrail HA."
        execute('setup_keepalived')
        execute('fixup_restart_haproxy_in_collector')

    if get_openstack_internal_vip():
        print "Multi Openstack setup, provisioning openstack HA."
        execute('setup_galera_cluster')
        execute('fix_wsrep_cluster_address')
        execute('setup_cmon_schema')
        execute('fix_restart_xinetd_conf')
        execute('fixup_restart_haproxy_in_openstack')
        execute('setup_glance_images_loc')
        execute('fix_memcache_conf')
        execute('tune_tcp')
        execute('fix_cmon_param_and_add_keys_to_compute')
        execute('create_and_copy_service_token')
Esempio n. 24
0
def setup_ha():
    execute('pre_check')

    if get_contrail_internal_vip():
        print "Contrail HA setup, provisioning contrail HA."
        execute('setup_keepalived')
        execute('fixup_restart_haproxy_in_collector')

    if get_openstack_internal_vip():
        print "Multi Openstack setup, provisioning openstack HA."
        execute('setup_galera_cluster')
        execute('fix_wsrep_cluster_address')
        execute('setup_cmon_schema')
        execute('fix_restart_xinetd_conf')
        execute('fixup_restart_haproxy_in_openstack')
        execute('setup_glance_images_loc')
        execute('fix_memcache_conf')
        execute('tune_tcp')
        execute('fix_cmon_param_and_add_keys_to_compute')
        execute('create_and_copy_service_token')
Esempio n. 25
0
def setup_ha():
    execute("pre_check")

    if get_contrail_internal_vip():
        print "Contrail HA setup, provisioning contrail HA."
        execute("setup_keepalived")
        execute("fixup_restart_haproxy_in_collector")

    if get_openstack_internal_vip():
        print "Multi Openstack setup, provisioning openstack HA."
        execute("setup_galera_cluster")
        execute("fix_wsrep_cluster_address")
        execute("setup_cmon_schema")
        execute("fix_restart_xinetd_conf")
        execute("fixup_restart_haproxy_in_openstack")
        execute("setup_glance_images_loc")
        execute("fix_memcache_conf")
        execute("tune_tcp")
        execute("fix_cmon_param_and_add_keys_to_compute")
        execute("create_and_copy_service_token")
Esempio n. 26
0
def setup_keepalived_node(role):
    """Task to provision VIP for node with keepalived"""
    mgmt_ip = hstr_to_ip(env.host_string)
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]
    
    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'
        
    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()
    if role == 'cfgm':
        internal_vip = get_contrail_internal_vip()
        external_vip = get_contrail_external_vip()
    keepalived_host_list = [get_control_host_string(keepalived_host)\
                           for keepalived_host in env.roledefs[role]]
    myindex = keepalived_host_list.index(self_host)
    if myindex >= 1:
        # Wait for VIP to be assiciated to MASTER
        with settings(host_string=env.roledefs[role][0], warn_only=True):
            while run("ip addr | grep %s" % internal_vip).failed:
                sleep(2)
                print "Waiting for VIP to be associated to MASTER VRRP."
                continue
 
    with cd(INSTALLER_DIR):
        cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\
               --self_ip %s --internal_vip %s --mgmt_self_ip %s\
               --self_index %d --num_nodes %d --role %s" % (openstack_host_password,
               openstack_admin_password, self_ip, internal_vip, mgmt_ip,
               (keepalived_host_list.index(self_host) + 1), len(env.roledefs[role]),
               role)
        if external_vip:
             cmd += ' --external_vip %s' % external_vip
        run(cmd)
Esempio n. 27
0
def setup_cmon_schema():
    """Task to configure cmon schema in the openstack nodes to monitor galera cluster"""
    if len(env.roledefs["openstack"]) <= 1:
        print "Single Openstack cluster, skipping cmon schema  setup."
        return

    openstack_host_list = [get_control_host_string(openstack_host) for openstack_host in env.roledefs["openstack"]]
    galera_ip_list = [hstr_to_ip(galera_host) for galera_host in openstack_host_list]
    internal_vip = get_openstack_internal_vip()

    mysql_token = sudo("cat /etc/contrail/mysql.token")
    pdist = detect_ostype()
    if pdist in ["ubuntu"]:
        mysql_svc = "mysql"
    elif pdist in ["centos", "redhat"]:
        mysql_svc = "mysqld"
    # Create cmon schema
    sudo('mysql -u root -p%s -e "CREATE SCHEMA IF NOT EXISTS cmon"' % mysql_token)
    sudo("mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_db.sql" % mysql_token)
    sudo("mysql -u root -p%s < /usr/local/cmon/share/cmon/cmon_data.sql" % mysql_token)

    # insert static data
    sudo("mysql -u root -p%s -e \"use cmon; insert into cluster(type) VALUES ('galera')\"" % mysql_token)

    host_list = galera_ip_list + ["localhost", "127.0.0.1", internal_vip]
    # Create cmon user
    for host in host_list:
        mysql_cmon_user_cmd = "mysql -u root -p%s -e \"CREATE USER 'cmon'@'%s' IDENTIFIED BY 'cmon'\"" % (
            mysql_token,
            host,
        )
        with settings(hide("everything"), warn_only=True):
            sudo(mysql_cmon_user_cmd)

    mysql_cmd = "mysql -uroot -p%s -e" % mysql_token
    # Grant privilages for cmon user.
    for host in host_list:
        sudo("%s \"GRANT ALL PRIVILEGES on *.* TO cmon@%s IDENTIFIED BY 'cmon' WITH GRANT OPTION\"" % (mysql_cmd, host))
Esempio n. 28
0
def setup_galera_cluster():
    """Task to cluster the openstack nodes with galera"""
    if len(env.roledefs['openstack']) <= 1:
        print "Single Openstack cluster, skipping galera cluster setup."
        return

    if env.roledefs['openstack'].index(env.host_string) == 0:
        execute('setup_passwordless_ssh', *env.roledefs['openstack'])
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    keystone_ip = get_keystone_ip()
    internal_vip = get_openstack_internal_vip()

    with cd(INSTALLER_DIR):
        sudo("setup-vnc-galera\
            --self_ip %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d" %
             (self_ip, keystone_ip, ' '.join(galera_ip_list), internal_vip,
              (openstack_host_list.index(self_host) + 1)))
Esempio n. 29
0
def setup_galera_cluster():
    """Task to cluster the openstack nodes with galera"""
    if len(env.roledefs['openstack']) <= 1:
        print "Single Openstack cluster, skipping galera cluster setup."
        return

    if env.roledefs['openstack'].index(env.host_string) == 0:
        execute('setup_passwordless_ssh', *env.roledefs['openstack'])
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    keystone_ip = get_keystone_ip()
    internal_vip = get_openstack_internal_vip()

    with cd(INSTALLER_DIR):
        sudo("setup-vnc-galera\
            --self_ip %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d" % ( self_ip, keystone_ip,
                ' '.join(galera_ip_list), internal_vip,
                (openstack_host_list.index(self_host) + 1)))
Esempio n. 30
0
def join_ha_cluster(new_ctrl_host):
    execute('pre_check')

    if get_contrail_internal_vip():
        print "Contrail HA setup - Adding %s to existing \
               cluster", new_ctrl_host
        execute('join_keepalived_cluster', new_ctrl_host)
        execute('fixup_restart_haproxy_in_collector')

    if get_openstack_internal_vip():
        if new_ctrl_host in env.roledefs['openstack']:
            print "Multi Openstack setup, Adding %s to the existing \
                   OpenStack cluster", new_ctrl_host
            execute('join_galera_cluster', new_ctrl_host)
            execute('setup_cmon_schema_node', new_ctrl_host)
            execute('fix_restart_xinetd_conf_node', new_ctrl_host)
            execute('fixup_restart_haproxy_in_openstack')
            execute('start_openstack')
            execute('fix_memcache_conf_node', new_ctrl_host)
            execute('tune_tcp_node', new_ctrl_host)
            execute('fix_cmon_param_and_add_keys_to_compute')
            execute('create_and_copy_service_token')
            execute('join_rabbitmq_cluster', new_ctrl_host)
            execute('increase_limits_node', new_ctrl_host)
            execute('join_orchestrator', new_ctrl_host)

        if new_ctrl_host in env.roledefs['database']:
            execute('setup_database_node', new_ctrl_host)
            execute('fix_zookeeper_config')
            execute('restart_all_zookeeper_servers')

        if new_ctrl_host in env.roledefs['cfgm']:
            execute('setup_cfgm_node', new_ctrl_host)
            execute('verify_cfgm')
            execute('fix_cfgm_config')

        if new_ctrl_host in env.roledefs['control']:
            execute('setup_control_node', new_ctrl_host)
            execute('verify_control')

        if new_ctrl_host in env.roledefs['collector']:
            execute('setup_collector_node', new_ctrl_host)
            execute('fix_collector_config')
            execute('verify_collector')

        if new_ctrl_host in env.roledefs['webui']:
            execute('setup_webui_node', new_ctrl_host)
            execute('fix_webui_config')
            execute('verify_webui')

        if new_ctrl_host in env.roledefs['cfgm']:
            execute('prov_config_node', new_ctrl_host)
            execute('prov_metadata_services')
            execute('prov_encap_type')

        if new_ctrl_host in env.roledefs['database']:
            execute('prov_database_node', new_ctrl_host)

        if new_ctrl_host in env.roledefs['collector']:
            execute('prov_analytics_node', new_ctrl_host)

        if new_ctrl_host in env.roledefs['control']:
            execute('prov_control_bgp')
            execute('prov_external_bgp')

        execute('setup_remote_syslog')
Esempio n. 31
0
def join_keepalived_cluster(new_ctrl_host):
    """Task to configure a new node into an existing keepalived cluster"""
    if get_openstack_internal_vip():
        execute('join_openstack_keepalived_node', new_ctrl_host)
    if get_contrail_internal_vip() != get_openstack_internal_vip():
        execute('join_contrail_keepalived_node', new_ctrl_host)
Esempio n. 32
0
def setup_keepalived():
    """Task to provision VIP for openstack/cfgm nodes with keepalived"""
    if get_openstack_internal_vip():
        execute('setup_openstack_keepalived')
    if get_contrail_internal_vip() != get_openstack_internal_vip():
        execute('setup_contrail_keepalived')
Esempio n. 33
0
def get_openstack_pkgs():
    pkgs = ['contrail-openstack']
    if len(env.roledefs['openstack']) > 1 and get_openstack_internal_vip():
        pkgs.append('contrail-openstack-ha')

    return pkgs
Esempio n. 34
0
def configure_test_env(contrail_fab_path='/opt/contrail/utils',
                       test_dir='/contrail-test'):
    """
    Configure test environment by creating sanity_params.ini and sanity_testbed.json files
    """
    print "Configuring test environment"
    sys.path.insert(0, contrail_fab_path)
    from fabfile.testbeds import testbed
    from fabfile.utils.host import get_openstack_internal_vip, \
        get_control_host_string, get_authserver_ip, get_admin_tenant_name, \
        get_authserver_port, get_env_passwords, get_authserver_credentials, \
        get_vcenter_ip, get_vcenter_port, get_vcenter_username, \
        get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \
        get_authserver_protocol, get_region_name, get_contrail_internal_vip, \
        get_openstack_external_vip, get_contrail_external_vip
    from fabfile.utils.multitenancy import get_mt_enable
    from fabfile.utils.interface import get_data_ip
    from fabfile.tasks.install import update_config_option, update_js_config

    cfgm_host = env.roledefs['cfgm'][0]

    auth_protocol = get_authserver_protocol()
    auth_server_ip = get_authserver_ip()
    auth_server_port = get_authserver_port()
    with settings(warn_only=True), hide('everything'):
        with lcd(contrail_fab_path):
            if local('git branch').succeeded:
                fab_revision = local('git log --format="%H" -n 1',
                                     capture=True)
            else:
                with settings(host_string=cfgm_host), hide('everything'):
                    fab_revision = run(
                        'cat /opt/contrail/contrail_packages/VERSION')
        with lcd(test_dir):
            if local('git branch').succeeded:
                revision = local('git log --format="%H" -n 1', capture=True)
            else:
                with settings(host_string=cfgm_host), hide('everything'):
                    revision = run(
                        'cat /opt/contrail/contrail_packages/VERSION')

    sanity_testbed_dict = {
        'hosts': [],
        'vgw': [],
        'esxi_vms': [],
        'vcenter_servers': [],
        'hosts_ipmi': [],
        'tor': [],
    }

    sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample'
    with open(sample_ini_file, 'r') as fd_sample_ini:
        contents_sample_ini = fd_sample_ini.read()
    sanity_ini_templ = string.Template(contents_sample_ini)

    if env.get('orchestrator', 'openstack') != 'vcenter':
        with settings(
                host_string=env.roledefs['openstack'][0]), hide('everything'):
            openstack_host_name = run("hostname")

    with settings(host_string=env.roledefs['cfgm'][0]), hide('everything'):
        cfgm_host_name = run("hostname")

    control_host_names = []
    for control_host in env.roledefs['control']:
        with settings(host_string=control_host), hide('everything'):
            host_name = run("hostname")
            control_host_names.append(host_name)

    cassandra_host_names = []
    if 'database' in env.roledefs.keys():
        for cassandra_host in env.roledefs['database']:
            with settings(host_string=cassandra_host), hide('everything'):
                host_name = run("hostname")
                cassandra_host_names.append(host_name)

    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()
    contrail_internal_vip = get_contrail_internal_vip()
    contrail_external_vip = get_contrail_external_vip()
    multi_role_test = False
    for host_string in env.roledefs['all']:
        if host_string in env.roledefs.get('test', []):
            for role in env.roledefs.iterkeys():
                if role in ['test', 'all']:
                    continue
                if host_string in env.roledefs.get(role, []):
                    multi_role_test = True
                    break
            if not multi_role_test:
                continue
        host_ip = host_string.split('@')[1]
        with settings(host_string=host_string), hide('everything'):
            host_name = run("hostname")

        host_dict = {}

        host_dict['ip'] = host_ip
        host_dict['data-ip'] = get_data_ip(host_string)[0]
        if host_dict['data-ip'] == host_string.split('@')[1]:
            host_dict['data-ip'] = get_data_ip(host_string)[0]
        host_dict['control-ip'] = get_control_host_string(host_string).split(
            '@')[1]

        host_dict['name'] = host_name
        host_dict['username'] = host_string.split('@')[0]
        host_dict['password'] = get_env_passwords(host_string)
        host_dict['roles'] = []

        if host_string in env.roledefs['openstack']:
            role_dict = {
                'type': 'openstack',
                'params': {
                    'cfgm': cfgm_host_name
                }
            }
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['cfgm']:
            role_dict = {
                'type': 'cfgm',
                'params': {
                    'collector': host_name,
                    'cassandra': ' '.join(cassandra_host_names)
                }
            }

            if env.get('orchestrator', 'openstack') != 'vcenter':
                role_dict['openstack'] = openstack_host_name
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['control']:
            role_dict = {
                'type': 'bgp',
                'params': {
                    'collector': cfgm_host_name,
                    'cfgm': cfgm_host_name
                }
            }
            host_dict['roles'].append(role_dict)

        if 'database' in env.roledefs.keys(
        ) and host_string in env.roledefs['database']:
            role_dict = {
                'type': 'database',
                'params': {
                    'cassandra': ' '.join(cassandra_host_names)
                }
            }
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['compute']:
            role_dict = {
                'type': 'compute',
                'params': {
                    'collector': cfgm_host_name,
                    'cfgm': cfgm_host_name
                }
            }
            role_dict['params']['bgp'] = []
            if len(env.roledefs['control']) == 1:
                role_dict['params']['bgp'] = control_host_names
            else:
                for control_node in control_host_names:
                    role_dict['params']['bgp'].append(control_node)
            # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))])
            host_dict['roles'].append(role_dict)

        if 'collector' in env.roledefs.keys(
        ) and host_string in env.roledefs['collector']:
            role_dict = {
                'type': 'collector',
                'params': {
                    'cassandra': ' '.join(cassandra_host_names)
                }
            }
            host_dict['roles'].append(role_dict)

        if 'webui' in env.roledefs.keys(
        ) and host_string in env.roledefs['webui']:
            role_dict = {'type': 'webui', 'params': {'cfgm': cfgm_host_name}}
            host_dict['roles'].append(role_dict)

        sanity_testbed_dict['hosts'].append(host_dict)
    if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw)

    # Read ToR config
    sanity_tor_dict = {}
    if env.has_key('tor_agent'):
        sanity_testbed_dict['tor_agent'] = env.tor_agent

    # Read any tor-host config
    if env.has_key('tor_hosts'):
        sanity_testbed_dict['tor_hosts'] = env.tor_hosts

    if env.has_key('xmpp_auth_enable'):
        sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable
    if env.has_key('xmpp_dns_auth_enable'):
        sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable

    # Read any MX config (as physical_router )
    if env.has_key('physical_routers'):
        sanity_testbed_dict['physical_routers'] = env.physical_routers

    esxi_hosts = getattr(testbed, 'esxi_hosts', None)
    if esxi_hosts:
        for esxi in esxi_hosts:
            host_dict = {}
            host_dict['ip'] = esxi_hosts[esxi]['ip']
            host_dict['data-ip'] = host_dict['ip']
            host_dict['control-ip'] = host_dict['ip']
            host_dict['name'] = esxi
            host_dict['username'] = esxi_hosts[esxi]['username']
            host_dict['password'] = esxi_hosts[esxi]['password']
            #Its used for vcenter only mode provosioning for contrail-vm
            #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py
            if 'contrail_vm' in esxi_hosts[esxi]:
                host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm'][
                    'host']
            host_dict['roles'] = []
            sanity_testbed_dict['hosts'].append(host_dict)
            sanity_testbed_dict['esxi_vms'].append(host_dict)

    vcenter_servers = env.get('vcenter_servers')
    if vcenter_servers:
        for vcenter in vcenter_servers:
            host_dict = {}
            host_dict['server'] = vcenter_servers[vcenter]['server']
            host_dict['port'] = vcenter_servers[vcenter]['port']
            host_dict['username'] = vcenter_servers[vcenter]['username']
            host_dict['password'] = vcenter_servers[vcenter]['password']
            host_dict['datacenter'] = vcenter_servers[vcenter]['datacenter']
            host_dict['auth'] = vcenter_servers[vcenter]['auth']
            host_dict['cluster'] = vcenter_servers[vcenter]['cluster']
            host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_switch'][
                'dv_switch_name']
            #Mostly we do not use the below info for vcenter sanity tests.
            #Its used for vcenter only mode provosioning for contrail-vm
            #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py
            if 'dv_port_group' in vcenter_servers[vcenter]:
                host_dict['dv_port_group'] = vcenter_servers[vcenter][
                    'dv_port_group']['dv_portgroup_name']
            sanity_testbed_dict['vcenter_servers'].append(host_dict)

    #get other orchestrators (vcenter etc) info if any
    slave_orch = None
    if env.has_key('other_orchestrators'):
        sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators
        for k, v in env.other_orchestrators.items():
            if v['type'] == 'vcenter':
                slave_orch = 'vcenter'

    # get host ipmi list
    if env.has_key('hosts_ipmi'):
        sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi)

    if not getattr(env, 'test', None):
        env.test = {}

    # generate json file and copy to cfgm
    sanity_testbed_json = json.dumps(sanity_testbed_dict)
    stack_user = env.test.get('stack_user', os.getenv('STACK_USER') or '')
    stack_password = env.test.get('stack_password',
                                  os.getenv('STACK_PASSWORD') or '')
    stack_tenant = env.test.get('stack_tenant',
                                os.getenv('STACK_TENANT') or '')
    tenant_isolation = env.test.get('tenant_isolation',
                                    os.getenv('TENANT_ISOLATION') or '')

    stop_on_fail = env.get('stop_on_fail', False)
    mail_to = env.test.get('mail_to', os.getenv('MAIL_TO') or '')
    log_scenario = env.get('log_scenario', 'Sanity')
    stack_region_name = get_region_name()
    admin_user, admin_password = get_authserver_credentials()
    admin_tenant = get_admin_tenant_name()
    # Few hardcoded variables for sanity environment
    # can be removed once we move to python3 and configparser
    stack_domain = env.get('stack_domain', 'default-domain')
    webserver_host = env.test.get('webserver_host',
                                  os.getenv('WEBSERVER_HOST') or '')
    webserver_user = env.test.get('webserver_user',
                                  os.getenv('WEBSERVER_USER') or '')
    webserver_password = env.test.get('webserver_password',
                                      os.getenv('WEBSERVER_PASSWORD') or '')
    webserver_log_path = env.test.get(
        'webserver_log_path',
        os.getenv('WEBSERVER_LOG_PATH') or '/var/www/contrail-test-ci/logs/')
    webserver_report_path = env.test.get(
        'webserver_report_path',
        os.getenv('WEBSERVER_REPORT_PATH')
        or '/var/www/contrail-test-ci/reports/')
    webroot = env.test.get('webroot',
                           os.getenv('WEBROOT') or 'contrail-test-ci')
    mail_server = env.test.get('mail_server', os.getenv('MAIL_SERVER') or '')
    mail_port = env.test.get('mail_port', os.getenv('MAIL_PORT') or '25')
    fip_pool_name = env.test.get(
        'fip_pool_name',
        os.getenv('FIP_POOL_NAME') or 'floating-ip-pool')
    public_virtual_network = env.test.get(
        'public_virtual_network',
        os.getenv('PUBLIC_VIRTUAL_NETWORK') or 'public')
    public_tenant_name = env.test.get(
        'public_tenant_name',
        os.getenv('PUBLIC_TENANT_NAME') or 'admin')
    fixture_cleanup = env.test.get('fixture_cleanup',
                                   os.getenv('FIXTURE_CLEANUP') or 'yes')
    generate_html_report = env.test.get(
        'generate_html_report',
        os.getenv('GENERATE_HTML_REPORT') or 'True')
    keypair_name = env.test.get('keypair_name',
                                os.getenv('KEYPAIR_NAME') or 'contrail_key')
    mail_sender = env.test.get(
        'mail_sender',
        os.getenv('MAIL_SENDER') or '*****@*****.**')
    discovery_ip = env.test.get('discovery_ip',
                                os.getenv('DISCOVERY_IP') or '')
    config_api_ip = env.test.get('config_api_ip',
                                 os.getenv('CONFIG_API_IP') or '')
    analytics_api_ip = env.test.get('analytics_api_ip',
                                    os.getenv('ANALYTICS_API_IP') or '')
    discovery_port = env.test.get('discovery_port',
                                  os.getenv('DISCOVERY_PORT') or '')
    config_api_port = env.test.get('config_api_port',
                                   os.getenv('CONFIG_API_PORT') or '')
    analytics_api_port = env.test.get('analytics_api_port',
                                      os.getenv('ANALYTICS_API_PORT') or '')
    control_port = env.test.get('control_port',
                                os.getenv('CONTROL_PORT') or '')
    dns_port = env.test.get('dns_port', os.getenv('DNS_PORT') or '')
    agent_port = env.test.get('agent_port', os.getenv('AGENT_PORT') or '')
    user_isolation = env.test.get('user_isolation',
                                  bool(os.getenv('USER_ISOLATION') or True))

    use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5',
                                        False)
    orch = getattr(env, 'orchestrator', 'openstack')
    router_asn = getattr(testbed, 'router_asn', '')
    public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '')
    public_vn_subnet = getattr(testbed, 'public_vn_subnet', '')
    ext_routers = getattr(testbed, 'ext_routers', '')
    router_info = str(ext_routers)
    test_verify_on_setup = getattr(env, 'test_verify_on_setup', True)
    webui = getattr(testbed, 'webui', False)
    horizon = getattr(testbed, 'horizon', False)
    ui_config = getattr(testbed, 'ui_config', False)
    ui_browser = getattr(testbed, 'ui_browser', False)

    if not env.has_key('openstack'):
        env.openstack = {}
    if not env.has_key('cfgm'):
        env.cfgm = {}

    config_amqp_ip = env.openstack.get('amqp_host', '')
    if config_amqp_ip:
        config_amqp_ips = [config_amqp_ip]
    else:
        config_amqp_ips = []

    # If amqp details are in env.cfgm as well, use that
    config_amqp_port = env.cfgm.get('amqp_port', '5672')
    config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips)

    key_filename = env.get('key_filename', '')
    pubkey_filename = env.get('pubkey_filename', '')

    vcenter_dc = ''
    if orch == 'vcenter' or slave_orch == 'vcenter':
        public_tenant_name = 'vCenter'

    if env.has_key('vcenter_servers'):
        if env.vcenter_servers:
            for k in env.vcenter_servers:
                vcenter_dc = env.vcenter_servers[k]['datacenter']

    sanity_params = sanity_ini_templ.safe_substitute({
        '__testbed_json_file__':
        'sanity_testbed.json',
        '__nova_keypair_name__':
        keypair_name,
        '__orch__':
        orch,
        '__admin_user__':
        admin_user,
        '__admin_password__':
        admin_password,
        '__admin_tenant__':
        admin_tenant,
        '__tenant_isolation__':
        tenant_isolation,
        '__stack_user__':
        stack_user,
        '__stack_password__':
        stack_password,
        '__auth_ip__':
        auth_server_ip,
        '__auth_port__':
        auth_server_port,
        '__auth_protocol__':
        auth_protocol,
        '__stack_region_name__':
        stack_region_name,
        '__stack_tenant__':
        stack_tenant,
        '__stack_domain__':
        stack_domain,
        '__multi_tenancy__':
        get_mt_enable(),
        '__address_family__':
        get_address_family(),
        '__log_scenario__':
        log_scenario,
        '__generate_html_report__':
        generate_html_report,
        '__fixture_cleanup__':
        fixture_cleanup,
        '__key_filename__':
        key_filename,
        '__pubkey_filename__':
        pubkey_filename,
        '__webserver__':
        webserver_host,
        '__webserver_user__':
        webserver_user,
        '__webserver_password__':
        webserver_password,
        '__webserver_log_dir__':
        webserver_log_path,
        '__webserver_report_dir__':
        webserver_report_path,
        '__webroot__':
        webroot,
        '__mail_server__':
        mail_server,
        '__mail_port__':
        mail_port,
        '__sender_mail_id__':
        mail_sender,
        '__receiver_mail_id__':
        mail_to,
        '__http_proxy__':
        env.get('http_proxy', ''),
        '__ui_browser__':
        ui_browser,
        '__ui_config__':
        ui_config,
        '__horizon__':
        horizon,
        '__webui__':
        webui,
        '__devstack__':
        False,
        '__public_vn_rtgt__':
        public_vn_rtgt,
        '__router_asn__':
        router_asn,
        '__router_name_ip_tuples__':
        router_info,
        '__public_vn_name__':
        fip_pool_name,
        '__public_virtual_network__':
        public_virtual_network,
        '__public_tenant_name__':
        public_tenant_name,
        '__public_vn_subnet__':
        public_vn_subnet,
        '__test_revision__':
        revision,
        '__fab_revision__':
        fab_revision,
        '__test_verify_on_setup__':
        test_verify_on_setup,
        '__stop_on_fail__':
        stop_on_fail,
        '__ha_setup__':
        getattr(testbed, 'ha_setup', ''),
        '__ipmi_username__':
        getattr(testbed, 'ipmi_username', ''),
        '__ipmi_password__':
        getattr(testbed, 'ipmi_password', ''),
        '__contrail_internal_vip__':
        contrail_internal_vip,
        '__contrail_external_vip__':
        contrail_external_vip,
        '__internal_vip__':
        internal_vip,
        '__external_vip__':
        external_vip,
        '__vcenter_dc__':
        vcenter_dc,
        '__vcenter_server__':
        get_vcenter_ip(),
        '__vcenter_port__':
        get_vcenter_port(),
        '__vcenter_username__':
        get_vcenter_username(),
        '__vcenter_password__':
        get_vcenter_password(),
        '__vcenter_datacenter__':
        get_vcenter_datacenter(),
        '__vcenter_compute__':
        get_vcenter_compute(),
        '__use_devicemanager_for_md5__':
        use_devicemanager_for_md5,
        '__discovery_port__':
        discovery_port,
        '__config_api_port__':
        config_api_port,
        '__analytics_api_port__':
        analytics_api_port,
        '__control_port__':
        control_port,
        '__dns_port__':
        dns_port,
        '__vrouter_agent_port__':
        agent_port,
        '__discovery_ip__':
        discovery_ip,
        '__config_api_ip__':
        config_api_ip,
        '__analytics_api_ip__':
        analytics_api_ip,
        '__user_isolation__':
        user_isolation,
        '__config_amqp_ips__':
        ','.join(config_amqp_ips),
        '__config_amqp_port__':
        config_amqp_port,
    })

    ini_file = test_dir + '/' + 'sanity_params.ini'
    testbed_json_file = test_dir + '/' + 'sanity_testbed.json'
    with open(ini_file, 'w') as ini:
        ini.write(sanity_params)

    with open(testbed_json_file, 'w') as tb:
        tb.write(sanity_testbed_json)

    # Create /etc/contrail/openstackrc
    if not os.path.exists('/etc/contrail'):
        os.makedirs('/etc/contrail')

    with open('/etc/contrail/openstackrc', 'w') as rc:
        rc.write("export OS_USERNAME=%s\n" % admin_user)
        rc.write("export OS_PASSWORD=%s\n" % admin_password)
        rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant)
        rc.write("export OS_REGION_NAME=%s\n" % stack_region_name)
        rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" %
                 (auth_protocol, auth_server_ip, auth_server_port))
        rc.write("export OS_NO_CACHE=1\n")

    # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone
    config = ConfigParser.ConfigParser()
    config.optionxform = str
    vnc_api_ini = '/etc/contrail/vnc_api_lib.ini'
    if os.path.exists(vnc_api_ini):
        config.read(vnc_api_ini)

    if 'auth' not in config.sections():
        config.add_section('auth')

    config.set('auth', 'AUTHN_TYPE', 'keystone')
    config.set('auth', 'AUTHN_PROTOCOL', auth_protocol)
    config.set('auth', 'AUTHN_SERVER', auth_server_ip)
    config.set('auth', 'AUTHN_PORT', auth_server_port)
    config.set('auth', 'AUTHN_URL', '/v2.0/tokens')

    with open(vnc_api_ini, 'w') as f:
        config.write(f)

    # If webui = True, in testbed, setup webui for sanity
    if webui:
        update_config_option('openstack', '/etc/keystone/keystone.conf',
                             'token', 'expiration', '86400', 'keystone')
        update_js_config('openstack', '/etc/contrail/config.global.js',
                         'contrail-webui')
Esempio n. 35
0
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'):
    """
    Configure test environment by creating sanity_params.ini and sanity_testbed.json files
    """
    print "Configuring test environment"
    sys.path.insert(0, contrail_fab_path)
    from fabfile.config import testbed
    from fabfile.utils.host import get_openstack_internal_vip, \
        get_control_host_string, get_authserver_ip, get_admin_tenant_name, \
        get_authserver_port, get_env_passwords, get_authserver_credentials, \
        get_vcenter_ip, get_vcenter_port, get_vcenter_username, \
        get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \
        get_authserver_protocol, get_region_name, get_contrail_internal_vip, \
        get_openstack_external_vip, get_contrail_external_vip, \
        get_apiserver_protocol, get_apiserver_certfile, get_apiserver_keyfile, \
        get_apiserver_cafile, get_keystone_insecure_flag, \
        get_apiserver_insecure_flag, get_keystone_certfile, get_keystone_keyfile, \
        get_keystone_cafile, get_keystone_version
    from fabfile.utils.multitenancy import get_mt_enable
    from fabfile.utils.interface import get_data_ip
    from fabfile.tasks.install import update_config_option, update_js_config
    from fabfile.utils.fabos import get_as_sudo
    logger = contrail_logging.getLogger(__name__)

    def validate_and_copy_file(filename, source_host):
        with settings(host_string='%s' %(source_host),
                      warn_only=True, abort_on_prompts=False):
            if exists(filename):
                filedir = os.path.dirname(filename)
                if not os.path.exists(filedir):
                    os.makedirs(filedir)
                get_as_sudo(filename, filename)
                return filename
            return ""

    cfgm_host = env.roledefs['cfgm'][0]
    auth_protocol = get_authserver_protocol()
    try:
        auth_server_ip = get_authserver_ip()
    except Exception:
        auth_server_ip = None
    auth_server_port = get_authserver_port()
    api_auth_protocol = get_apiserver_protocol()

    if api_auth_protocol == 'https':
        api_certfile = validate_and_copy_file(get_apiserver_certfile(), cfgm_host)
        api_keyfile = validate_and_copy_file(get_apiserver_keyfile(), cfgm_host)
        api_cafile = validate_and_copy_file(get_apiserver_cafile(), cfgm_host)
        api_insecure_flag = get_apiserver_insecure_flag()
    else:
       api_certfile = ""
       api_keyfile = ""
       api_cafile = ""
       api_insecure_flag = True

    cert_dir = os.path.dirname(api_certfile)
    if auth_protocol == 'https':
        keystone_cafile = validate_and_copy_file(cert_dir + '/' +\
                          os.path.basename(get_keystone_cafile()), cfgm_host)
        keystone_certfile = validate_and_copy_file(cert_dir + '/' +\
                          os.path.basename(get_keystone_certfile()), cfgm_host)
        keystone_keyfile = keystone_certfile
        keystone_insecure_flag = istrue(os.getenv('OS_INSECURE', \
                                 get_keystone_insecure_flag()))
    else:
        keystone_certfile = ""
        keystone_keyfile = ""
        keystone_cafile = ""
        keystone_insecure_flag = True

    with settings(warn_only=True), hide('everything'):
        with lcd(contrail_fab_path):
            if local('git branch').succeeded:
                fab_revision = local('git log --format="%H" -n 1', capture=True)
            else:
                with settings(host_string=cfgm_host), hide('everything'):
                   fab_revision = run('cat /opt/contrail/contrail_packages/VERSION')
        with lcd(test_dir):
            if local('git branch').succeeded:
                revision = local('git log --format="%H" -n 1', capture=True)
            else:
                with settings(host_string=cfgm_host), hide('everything'):
                    revision = run('cat /opt/contrail/contrail_packages/VERSION')

    sanity_testbed_dict = {
        'hosts': [],
        'vgw': [],
        'esxi_vms':[],
        'vcenter_servers':[],
        'hosts_ipmi': [],
        'tor':[],
        'sriov':[],
        'dpdk':[],
        'ns_agilio_vrouter':[],
    }

    sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample'
    with open(sample_ini_file, 'r') as fd_sample_ini:
       contents_sample_ini = fd_sample_ini.read()
    sanity_ini_templ = string.Template(contents_sample_ini)

    if not getattr(env, 'test', None):
        env.test={}
    containers = env.test.get('containers')
    traffic_data = env.test.get('traffic_data')
    ixia_linux_host_ip = get_value_of_key(traffic_data, 'ixia_linux_host_ip')
    ixia_host_ip = get_value_of_key(traffic_data, 'ixia_host_ip')
    spirent_linux_host_ip = get_value_of_key(traffic_data, 'spirent_linux_host_ip')
    ixia_linux_username = get_value_of_key(traffic_data, 'ixia_linux_username')
    ixia_linux_password = get_value_of_key(traffic_data, 'ixia_linux_password')
    spirent_linux_username = get_value_of_key(traffic_data, 'spirent_linux_username')
    spirent_linux_password = get_value_of_key(traffic_data, 'spirent_linux_password')

    if env.get('orchestrator', 'openstack') == 'openstack':
        with settings(host_string = env.roledefs['openstack'][0]), hide('everything'):
            openstack_host_name = run("hostname")

    with settings(host_string = env.roledefs['cfgm'][0]), hide('everything'):
        cfgm_host_name = run("hostname")

    control_host_names = []
    for control_host in env.roledefs['control']:
        with settings(host_string = control_host), hide('everything'):
            host_name = run("hostname")
            control_host_names.append(host_name)

    cassandra_host_names = []
    if 'database' in env.roledefs.keys():
        for cassandra_host in env.roledefs['database']:
            with settings(host_string = cassandra_host), hide('everything'):
                host_name = run("hostname")
                cassandra_host_names.append(host_name)
    keystone_version = get_keystone_version()
    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()
    contrail_internal_vip = get_contrail_internal_vip()
    contrail_external_vip = get_contrail_external_vip()
    multi_role_test = False
    for host_string in env.roledefs['all']:
        if host_string in env.roledefs.get('test',[]):
            for role in env.roledefs.iterkeys():
                if role in ['test','all']:
                    continue
                if host_string in env.roledefs.get(role,[]):
                    multi_role_test=True
                    break
            if not multi_role_test:
                continue
        host_ip = host_string.split('@')[1]
        with settings(host_string = host_string), hide('everything'):
            try:
                host_name = run("hostname")
                host_fqname = run("hostname -f")
            except:
                logger.warn('Unable to login to %s'%host_ip)
                continue
        host_dict = {}

        host_dict['ip'] = host_ip
        host_dict['data-ip']= get_data_ip(host_string)[0]
        if host_dict['data-ip'] == host_string.split('@')[1]:
            host_dict['data-ip'] = get_data_ip(host_string)[0]
        host_dict['control-ip']= get_control_host_string(host_string).split('@')[1]

        host_dict['name'] = host_name
        host_dict['fqname'] = host_fqname
        host_dict['username'] = host_string.split('@')[0]
        host_dict['password'] =get_env_passwords(host_string)
        host_dict['roles'] = []

        if env.get('qos', {}):
            if host_string in env.qos.keys():
                role_dict = env.qos[host_string]
                host_dict['qos'] = role_dict
        if env.get('qos_niantic', {}):
            if host_string in env.qos_niantic.keys():
                role_dict = env.qos_niantic[host_string]
                host_dict['qos_niantic'] = role_dict

        if host_string in env.roledefs['openstack']:
            role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}}
            role_dict['container'] = get_container_name(containers, host_string, 'openstack')
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['cfgm']:
            role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}}
            role_dict['container'] = get_container_name(containers, host_string, 'controller')
            if env.get('orchestrator', 'openstack') == 'openstack':
                role_dict['openstack'] = openstack_host_name
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['control']:
            role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
            role_dict['container'] = get_container_name(containers, host_string, 'controller')
            host_dict['roles'].append(role_dict)

        if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']:
            role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
            role_dict['container'] = get_container_name(containers, host_string, 'analyticsdb')
            host_dict['roles'].append(role_dict)

        if not env.roledefs.get('compute'):
            env.roledefs['compute'] = []
        if host_string in env.roledefs['compute']:
            role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
            role_dict['container'] = get_container_name(containers, host_string, 'agent')
            role_dict['params']['bgp'] = []
            if len(env.roledefs['control']) == 1:
                role_dict['params']['bgp'] = control_host_names
            else:
                for control_node in control_host_names:
                    role_dict['params']['bgp'].append(control_node)
               # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))])
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs.get('lb',[]):
            role_dict = {'type': 'lb', 'params': {'lb': host_name}}
            role_dict['container'] = get_container_name(containers, host_string, 'lb')
            host_dict['roles'].append(role_dict)

        if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']:
            role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
            role_dict['container'] = get_container_name(containers, host_string, 'analytics')
            host_dict['roles'].append(role_dict)

        if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']:
            role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} }
            role_dict['container'] = get_container_name(containers, host_string, 'controller')
            host_dict['roles'].append(role_dict)

        # Kube managers
        if 'contrail-kubernetes' in env.roledefs.keys() and \
                host_string in env.roledefs['contrail-kubernetes']:
            role_dict = { 'type': 'contrail-kubernetes', 'params': {} }
            role_dict['container'] = get_container_name(containers,
                host_string, 'contrail-kube-manager')
            host_dict['roles'].append(role_dict)

        sanity_testbed_dict['hosts'].append(host_dict)
    if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw)

    #get sriov info
    if env.has_key('sriov'):
        sanity_testbed_dict['sriov'].append(env.sriov)

    #get dpdk info
    if env.has_key('dpdk'):
        sanity_testbed_dict['dpdk'].append(env.dpdk)

    #get k8s info
    sanity_testbed_dict['kubernetes'] = env.get('kubernetes', {})

   #get ns_agilio_vrouter info
    if env.has_key('ns_agilio_vrouter'):
        sanity_testbed_dict['ns_agilio_vrouter'].append(env.ns_agilio_vrouter)

    # Read ToR config
    sanity_tor_dict = {}
    if env.has_key('tor_agent'):
        sanity_testbed_dict['tor_agent'] = env.tor_agent

    # Read any tor-host config
    if env.has_key('tor_hosts'):
        sanity_testbed_dict['tor_hosts'] = env.tor_hosts

    if env.has_key('xmpp_auth_enable'):
        sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable
    if env.has_key('xmpp_dns_auth_enable'):
        sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable
    if env.has_key('metadata_ssl_enable'):
        sanity_testbed_dict['metadata_ssl_enable'] = env.metadata_ssl_enable

    if env.has_key('dm_mx'):
        sanity_testbed_dict['dm_mx'] = env.dm_mx

    # Read any MX config (as physical_router )
    if env.has_key('physical_routers'):
        sanity_testbed_dict['physical_routers'] = env.physical_routers

    esxi_hosts = getattr(testbed, 'esxi_hosts', None)
    if esxi_hosts:
        for esxi in esxi_hosts:
            host_dict = {}
            host_dict['ip'] = esxi_hosts[esxi]['ip']
            host_dict['data-ip'] = host_dict['ip']
            host_dict['control-ip'] = host_dict['ip']
            host_dict['name'] = esxi
            host_dict['username'] = esxi_hosts[esxi]['username']
            host_dict['password'] = esxi_hosts[esxi]['password']
            #Its used for vcenter only mode provosioning for contrail-vm
            #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py
            if 'contrail_vm' in esxi_hosts[esxi]:
                host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host']
            host_dict['roles'] = []
            host_dict['type'] = 'esxi'
            sanity_testbed_dict['hosts'].append(host_dict)
            sanity_testbed_dict['esxi_vms'].append(host_dict)

    vcenter_servers = env.get('vcenter_servers')
    if vcenter_servers:
        for vcenter in vcenter_servers:
            sanity_testbed_dict['vcenter_servers'].append(vcenter_servers[vcenter])

    orch = getattr(env, 'orchestrator', 'openstack')
    deployer = getattr(env, 'deployer', 'openstack')
    #get other orchestrators (vcenter etc) info if any
    slave_orch = None
    if env.has_key('other_orchestrators'):
        sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators
        for k,v in env.other_orchestrators.items():
            if v['type'] == 'vcenter':
                slave_orch = 'vcenter'

    # get host ipmi list
    if env.has_key('hosts_ipmi'):
        sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi)

    # Setting slave orch to k8s when key present
    if env.has_key('kubernetes'):
        if  sanity_testbed_dict['kubernetes']['mode'] == 'nested':
            slave_orch = 'kubernetes'

    # generate json file and copy to cfgm
    sanity_testbed_json = json.dumps(sanity_testbed_dict)
    stack_user = os.getenv('STACK_USER', env.get('stack_user', env.test.get('stack_user', '')))
    stack_password = os.getenv('STACK_PASSWORD',
            env.test.get('stack_password',''))
    stack_tenant = os.getenv('STACK_TENANT', env.get('stack_tenant',
            env.test.get('stack_tenant', '')))
    stack_domain = os.getenv('STACK_DOMAIN',
            env.get('stack_domain', env.test.get('stack_domain', '')))
    use_project_scoped_token = env.test.get('use_project_scoped_token', '')
    if not env.has_key('domain_isolation'):
        env.domain_isolation = False
    if not env.has_key('cloud_admin_domain'):
        env.cloud_admin_domain = 'Default'
    if not env.has_key('cloud_admin_user'):
        env.cloud_admin_user = '******'
    if not env.has_key('cloud_admin_password'):
        env.cloud_admin_password = env.get('openstack_admin_password')
    domain_isolation = os.getenv('DOMAIN_ISOLATION',
            env.test.get('domain_isolation', env.domain_isolation))
    cloud_admin_domain = os.getenv('CLOUD_ADMIN_DOMAIN',
            env.test.get('cloud_admin_domain', env.cloud_admin_domain))
    cloud_admin_user = os.getenv('CLOUD_ADMIN_USER',
            env.test.get('cloud_admin_user', env.cloud_admin_user))
    cloud_admin_password = os.getenv('CLOUD_ADMIN_PASSWORD',
            env.test.get('cloud_admin_password', env.cloud_admin_password))
    tenant_isolation = os.getenv('TENANT_ISOLATION',
            env.test.get('tenant_isolation', ''))

    stop_on_fail = env.get('stop_on_fail', False)
    mail_to = os.getenv('MAIL_TO', env.test.get('mail_to', ''))
    log_scenario = env.get('log_scenario', 'Sanity')
    stack_region_name = get_region_name()
    admin_user, admin_password = get_authserver_credentials()
    if orch == 'kubernetes':
        admin_tenant = 'default'
    else:
        admin_tenant = get_admin_tenant_name()
    # Few hardcoded variables for sanity environment
    # can be removed once we move to python3 and configparser

    webserver_host = os.getenv('WEBSERVER_HOST',
            env.test.get('webserver_host',''))
    webserver_user = os.getenv('WEBSERVER_USER',
            env.test.get('webserver_user', ''))
    webserver_password = os.getenv('WEBSERVER_PASSWORD',
            env.test.get('webserver_password', ''))
    webserver_log_path = os.getenv('WEBSERVER_LOG_PATH',
            env.test.get('webserver_log_path', '/var/www/contrail-test-ci/logs/'))
    webserver_report_path = os.getenv('WEBSERVER_REPORT_PATH',
            env.test.get('webserver_report_path', '/var/www/contrail-test-ci/reports/'))
    webroot = os.getenv('WEBROOT', env.test.get('webroot', 'contrail-test-ci'))
    mail_server = os.getenv('MAIL_SERVER', env.test.get('mail_server', ''))
    mail_port = os.getenv('MAIL_PORT', env.test.get('mail_port', '25'))
    fip_pool_name = os.getenv('FIP_POOL_NAME',
            env.test.get('fip_pool_name', 'floating-ip-pool'))
    public_virtual_network = os.getenv('PUBLIC_VIRTUAL_NETWORK',
            env.test.get('public_virtual_network', 'public'))
    public_tenant_name = os.getenv('PUBLIC_TENANT_NAME',
            env.test.get('public_tenant_name', 'admin'))
    fixture_cleanup = os.getenv('FIXTURE_CLEANUP',
            env.test.get('fixture_cleanup', 'yes'))
    generate_html_report = os.getenv('GENERATE_HTML_REPORT',
            env.test.get('generate_html_report', 'True'))
    keypair_name = os.getenv('KEYPAIR_NAME',
            env.test.get('keypair_name', 'contrail_key'))
    mail_sender = os.getenv('MAIL_SENDER', env.test.get('mail_sender', '*****@*****.**'))
    discovery_ip = os.getenv('DISCOVERY_IP', env.test.get('discovery_ip', ''))
    config_api_ip = os.getenv('CONFIG_API_IP', env.test.get('config_api_ip', ''))
    analytics_api_ip = os.getenv('ANALYTICS_API_IP',
            env.test.get('analytics_api_ip', ''))
    discovery_port = os.getenv('DISCOVERY_PORT',
            env.test.get('discovery_port', ''))
    config_api_port = os.getenv('CONFIG_API_PORT',
            env.test.get('config_api_port', ''))
    analytics_api_port = os.getenv('ANALYTICS_API_PORT',
            env.test.get('analytics_api_port', ''))
    control_port = os.getenv('CONTROL_PORT', env.test.get('control_port', ''))
    dns_port = os.getenv('DNS_PORT', env.test.get('dns_port', ''))
    agent_port = os.getenv('AGENT_PORT', env.test.get('agent_port', ''))
    user_isolation = os.getenv('USER_ISOLATION',
            env.test.get('user_isolation', False if stack_user else True))
    neutron_username = os.getenv('NEUTRON_USERNAME',
            env.test.get('neutron_username', None))
    availability_zone = os.getenv('AVAILABILITY_ZONE',
            env.test.get('availability_zone', None))
    ci_flavor = os.getenv('CI_FLAVOR', env.test.get('ci_flavor', None))
    kube_config_file = env.test.get('kube_config_file',
                                     '/etc/kubernetes/admin.conf')
    openshift_src_config_file = env.test.get('openshift_src_config_file',
                                     '/root/.kube/config')
    use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False)
    router_asn = getattr(testbed, 'router_asn', '')
    public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '')
    public_vn_subnet = getattr(testbed, 'public_vn_subnet', '')
    ext_routers = getattr(testbed, 'ext_routers', '')
    router_info = str(ext_routers)
    fabric_gw = getattr(testbed, 'fabric_gw', '')
    fabric_gw_info = str(fabric_gw)
    test_verify_on_setup = getattr(env, 'test_verify_on_setup', True)
    webui = getattr(testbed, 'webui', False)
    horizon = getattr(testbed, 'horizon', False)
    ui_config = getattr(testbed, 'ui_config', False)
    ui_browser = getattr(testbed, 'ui_browser', False)

    if not env.has_key('openstack'):
        env.openstack = {}
    if not env.has_key('cfgm'):
        env.cfgm = {}

    config_amqp_ip = env.openstack.get('amqp_host', '')
    if config_amqp_ip:
        config_amqp_ips = [config_amqp_ip]
    else:
        config_amqp_ips = []

    # If amqp details are in env.cfgm as well, use that
    config_amqp_port = env.cfgm.get('amqp_port', '5672')
    config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips)

    key_filename = env.get('key_filename', '')
    pubkey_filename = env.get('pubkey_filename', '')

    vcenter_dc = ''
    if orch == 'vcenter' or slave_orch== 'vcenter':
        public_tenant_name='vCenter'

    if env.has_key('vcenter_servers'):
            if env.vcenter_servers:
                for vc in env.vcenter_servers:
                    for dc in env.vcenter_servers[vc]['datacenters']:
                        vcenter_dc = dc

    #global controller
    gc_host_mgmt = getattr(testbed, 'gc_host_mgmt', '')
    gc_host_control_data =  getattr(testbed, 'gc_host_control_data', '')
    gc_user_name = getattr(testbed, 'gc_user_name', '')
    gc_user_pwd = getattr(testbed, 'gc_user_pwd', '')
    keystone_password = getattr(testbed, 'keystone_password', '')

    sanity_params = sanity_ini_templ.safe_substitute(
        {'__testbed_json_file__'   : 'sanity_testbed.json',
         '__keystone_version__'    : keystone_version,
         '__use_project_scoped_token__': use_project_scoped_token,
         '__nova_keypair_name__'   : keypair_name,
         '__orch__'                : orch,
         '__deployer__'            : deployer,
         '__admin_user__'          : admin_user,
         '__admin_password__'      : admin_password,
         '__admin_tenant__'        : admin_tenant,
         '__domain_isolation__'    : domain_isolation,
         '__cloud_admin_domain__'  : cloud_admin_domain,
         '__cloud_admin_user__'    : cloud_admin_user,
         '__cloud_admin_password__': cloud_admin_password,
         '__tenant_isolation__'    : tenant_isolation,
         '__stack_user__'          : stack_user,
         '__stack_password__'      : stack_password,
         '__auth_ip__'             : auth_server_ip,
         '__auth_port__'           : auth_server_port,
         '__auth_protocol__'       : auth_protocol,
         '__stack_region_name__'   : stack_region_name,
         '__stack_tenant__'        : stack_tenant,
         '__stack_domain__'        : stack_domain,
         '__multi_tenancy__'       : get_mt_enable(),
         '__address_family__'      : get_address_family(),
         '__log_scenario__'        : log_scenario,
         '__generate_html_report__': generate_html_report,
         '__fixture_cleanup__'     : fixture_cleanup,
         '__key_filename__'        : key_filename,
         '__pubkey_filename__'     : pubkey_filename,
         '__webserver__'           : webserver_host,
         '__webserver_user__'      : webserver_user,
         '__webserver_password__'  : webserver_password,
         '__webserver_log_dir__'   : webserver_log_path,
         '__webserver_report_dir__': webserver_report_path,
         '__webroot__'             : webroot,
         '__mail_server__'         : mail_server,
         '__mail_port__'           : mail_port,
         '__sender_mail_id__'      : mail_sender,
         '__receiver_mail_id__'    : mail_to,
         '__http_proxy__'          : env.get('http_proxy', ''),
         '__ui_browser__'          : ui_browser,
         '__ui_config__'           : ui_config,
         '__horizon__'             : horizon,
         '__webui__'               : webui,
         '__devstack__'            : False,
         '__public_vn_rtgt__'      : public_vn_rtgt,
         '__router_asn__'          : router_asn,
         '__router_name_ip_tuples__': router_info,
         '__fabric_gw_name_ip_tuple__': fabric_gw_info,
         '__public_vn_name__'      : fip_pool_name,
         '__public_virtual_network__':public_virtual_network,
         '__public_tenant_name__'  :public_tenant_name,
         '__public_vn_subnet__'    : public_vn_subnet,
         '__test_revision__'       : revision,
         '__fab_revision__'        : fab_revision,
         '__test_verify_on_setup__': test_verify_on_setup,
         '__stop_on_fail__'        : stop_on_fail,
         '__ha_setup__'            : getattr(testbed, 'ha_setup', ''),
         '__ipmi_username__'       : getattr(testbed, 'ipmi_username', ''),
         '__ipmi_password__'       : getattr(testbed, 'ipmi_password', ''),
         '__contrail_internal_vip__' : contrail_internal_vip,
         '__contrail_external_vip__' : contrail_external_vip,
         '__internal_vip__'        : internal_vip,
         '__external_vip__'        : external_vip,
         '__vcenter_dc__'          : vcenter_dc,
         '__vcenter_server__'      : get_vcenter_ip(),
         '__vcenter_port__'        : get_vcenter_port(),
         '__vcenter_username__'    : get_vcenter_username(),
         '__vcenter_password__'    : get_vcenter_password(),
         '__vcenter_datacenter__'  : get_vcenter_datacenter(),
         '__vcenter_compute__'     : get_vcenter_compute(),
         '__use_devicemanager_for_md5__'       : use_devicemanager_for_md5,
         '__discovery_port__'      : discovery_port,
         '__config_api_port__'     : config_api_port,
         '__analytics_api_port__'  : analytics_api_port,
         '__control_port__'        : control_port,
         '__dns_port__'            : dns_port,
         '__vrouter_agent_port__'  : agent_port,
         '__discovery_ip__'        : discovery_ip,
         '__config_api_ip__'       : config_api_ip,
         '__analytics_api_ip__'    : analytics_api_ip,
         '__user_isolation__'      : user_isolation,
         '__neutron_username__'    : neutron_username,
         '__availability_zone__'   : availability_zone,
         '__ci_flavor__'           : ci_flavor,
         '__config_amqp_ips__'     : ','.join(config_amqp_ips),
         '__config_amqp_port__'    : config_amqp_port,
         '__api_auth_protocol__'   : api_auth_protocol,
         '__api_certfile__'        : api_certfile,
         '__api_keyfile__'         : api_keyfile,
         '__api_cafile__'          : api_cafile,
         '__api_insecure_flag__'   : api_insecure_flag,
         '__keystone_certfile__'   : keystone_certfile,
         '__keystone_keyfile__'    : keystone_keyfile,
         '__keystone_cafile__'     : keystone_cafile,
         '__keystone_insecure_flag__': keystone_insecure_flag,
         '__gc_host_mgmt__'        : gc_host_mgmt,
         '__gc_host_control_data__': gc_host_control_data,
         '__gc_user_name__'        : gc_user_name,
         '__gc_user_pwd__'         : gc_user_pwd,
         '__keystone_password__'   : keystone_password,
         '__slave_orch__'          : slave_orch,
	 '__ixia_linux_host_ip__'  : ixia_linux_host_ip,
	 '__ixia_host_ip__'        : ixia_host_ip,
	 '__spirent_linux_host_ip__': spirent_linux_host_ip,
	 '__ixia_linux_username__' : ixia_linux_username,
	 '__ixia_linux_password__' : ixia_linux_password,
	 '__spirent_linux_username__': spirent_linux_username,
	 '__spirent_linux_password__': spirent_linux_password,

        })

    ini_file = test_dir + '/' + 'sanity_params.ini'
    testbed_json_file = test_dir + '/' + 'sanity_testbed.json'
    with open(ini_file, 'w') as ini:
        ini.write(sanity_params)

    with open(testbed_json_file,'w') as tb:
        tb.write(sanity_testbed_json)

    # Create /etc/contrail/openstackrc
    if not os.path.exists('/etc/contrail'):
        os.makedirs('/etc/contrail')

    keycertbundle = None
    if keystone_cafile and keystone_keyfile and keystone_certfile:
        bundle = '/tmp/keystonecertbundle.pem'
        certs = [keystone_certfile, keystone_keyfile, keystone_cafile]
        keycertbundle = utils.getCertKeyCaBundle(bundle, certs)

    with open('/etc/contrail/openstackrc','w') as rc:
        rc.write("export OS_USERNAME=%s\n" % admin_user)
        rc.write("export OS_PASSWORD=%s\n" % admin_password)
        rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant)
        rc.write("export OS_REGION_NAME=%s\n" % stack_region_name)
        rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol,
                                                           auth_server_ip,
                                                           auth_server_port))
        rc.write("export OS_CACERT=%s\n" % keycertbundle)
        rc.write("export OS_CERT=%s\n" % keystone_certfile)
        rc.write("export OS_KEY=%s\n" % keystone_keyfile)
        rc.write("export OS_INSECURE=%s\n" % keystone_insecure_flag)
        rc.write("export OS_NO_CACHE=1\n")

    # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone
    config = ConfigParser.ConfigParser()
    config.optionxform = str
    vnc_api_ini = '/etc/contrail/vnc_api_lib.ini'
    if os.path.exists(vnc_api_ini):
        config.read(vnc_api_ini)

    if 'auth' not in config.sections():
        config.add_section('auth')

    config.set('auth','AUTHN_TYPE', 'keystone')
    config.set('auth','AUTHN_PROTOCOL', auth_protocol)
    config.set('auth','AUTHN_SERVER', auth_server_ip)
    config.set('auth','AUTHN_PORT', auth_server_port)
    if keystone_version == 'v3':
        config.set('auth','AUTHN_URL', '/v3/auth/tokens')
    else:
        config.set('auth','AUTHN_URL', '/v2.0/tokens')

    if api_auth_protocol == 'https':
        if 'global' not in config.sections():
            config.add_section('global')
        config.set('global','certfile', api_certfile)
        config.set('global','cafile', api_cafile)
        config.set('global','keyfile', api_keyfile)
        config.set('global','insecure',api_insecure_flag)

    if auth_protocol == 'https':
        if 'auth' not in config.sections():
            config.add_section('auth')
        config.set('auth','certfile', keystone_certfile)
        config.set('auth','cafile', keystone_cafile)
        config.set('auth','keyfile', keystone_keyfile)
        config.set('auth','insecure', keystone_insecure_flag)

    with open(vnc_api_ini,'w') as f:
        config.write(f)

    # Get kube config file to the testrunner node
    if orch == 'kubernetes' or slave_orch == 'kubernetes':
        if not os.path.exists(kube_config_file):
            dir_name = os.path.dirname(kube_config_file)
            if not os.path.exists(dir_name):
                os.makedirs(dir_name)
            with settings(host_string = env.kubernetes['master']):
                if deployer == 'openshift' :
                    get(openshift_src_config_file, kube_config_file)
                else:
                    get(kube_config_file, kube_config_file)


    # If webui = True, in testbed, setup webui for sanity
    if webui:
        sku = get_build_sku(cfgm_host)
        update_config_option('openstack', '/etc/keystone/keystone.conf',
                             'token', 'expiration',
                             '86400','keystone', sku)
        update_js_config('webui', '/etc/contrail/config.global.js',
                         'contrail-webui', container=is_container_env)
Esempio n. 36
0
def get_openstack_pkgs():
    pkgs = ['contrail-openstack']
    if len(env.roledefs['openstack']) > 1 and get_openstack_internal_vip():
        pkgs.append('contrail-openstack-ha')

    return pkgs
Esempio n. 37
0
def get_openstack_pkgs():
    pkgs = ["contrail-openstack"]
    if len(env.roledefs["openstack"]) > 1 and get_openstack_internal_vip():
        pkgs.append("contrail-openstack-ha")

    return pkgs
Esempio n. 38
0
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'):
    """
    Configure test environment by creating sanity_params.ini and sanity_testbed.json files
    """
    sys.path.insert(0, contrail_fab_path)
    from fabfile.testbeds import testbed
    from fabfile.utils.host import get_openstack_internal_vip,\
        get_control_host_string, get_authserver_ip, get_admin_tenant_name, \
        get_authserver_port, get_env_passwords, get_authserver_credentials, \
        get_vcenter_ip, get_vcenter_port, get_vcenter_username, \
        get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute
    from fabfile.utils.multitenancy import get_mt_enable
    from fabfile.utils.interface import get_data_ip

    cfgm_host = env.roledefs['cfgm'][0]

    with settings(warn_only=True):
        with lcd(contrail_fab_path):
            if local('git branch').succeeded:
                fab_revision = local('git log --format="%H" -n 1', capture=True)
            else:
                with settings(host_string=cfgm_host):
                   fab_revision = run('cat /opt/contrail/contrail_packages/VERSION')
        with lcd(test_dir):
            if local('git branch').succeeded:
                revision = local('git log --format="%H" -n 1', capture=True)
            else:
                with settings(host_string=cfgm_host):
                    revision = run('cat /opt/contrail/contrail_packages/VERSION')

    sanity_testbed_dict = {
        'hosts': [],
        'vgw': [],
        'esxi_vms':[],
        'hosts_ipmi': [],
        'tor':[],
    }

    sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample'
    with open(sample_ini_file, 'r') as fd_sample_ini:
       contents_sample_ini = fd_sample_ini.read()
    sanity_ini_templ = string.Template(contents_sample_ini)

    with settings(host_string = env.roledefs['openstack'][0]):
        openstack_host_name = run("hostname")

    with settings(host_string = env.roledefs['cfgm'][0]):
        cfgm_host_name = run("hostname")

    control_host_names = []
    for control_host in env.roledefs['control']:
        with settings(host_string = control_host):
            host_name = run("hostname")
            control_host_names.append(host_name)

    cassandra_host_names = []
    if 'database' in env.roledefs.keys():
        for cassandra_host in env.roledefs['database']:
            with settings(host_string = cassandra_host):
                host_name = run("hostname")
                cassandra_host_names.append(host_name)

    internal_vip = get_openstack_internal_vip()
    for host_string in env.roledefs['all']:
        if host_string in env.roledefs.get('test',[]):
            continue
        host_ip = host_string.split('@')[1]
        with settings(host_string = host_string):
            host_name = run("hostname")

        host_dict = {}

        host_dict['ip'] = host_ip
        host_dict['data-ip']= get_data_ip(host_string)[0]
        if host_dict['data-ip'] == host_string.split('@')[1]:
            host_dict['data-ip'] = get_data_ip(host_string)[0]
        host_dict['control-ip']= get_control_host_string(host_string).split('@')[1]

        host_dict['name'] = host_name
        host_dict['username'] = host_string.split('@')[0]
        host_dict['password'] =get_env_passwords(host_string)
        host_dict['roles'] = []

        if not internal_vip:
            if host_string in env.roledefs['openstack']:
                role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}}
                host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['cfgm']:
            role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}}

            if internal_vip:
                role_dict['openstack'] = 'contrail-vip'
            else:
                role_dict['openstack'] = openstack_host_name

            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['control']:
            role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
            host_dict['roles'].append(role_dict)

        if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']:
            role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['compute']:
            role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
            role_dict['params']['bgp'] = []
            if len(env.roledefs['control']) == 1:
                role_dict['params']['bgp'] = control_host_names
            else:
                for control_node in control_host_names:
                    role_dict['params']['bgp'].append(control_node)
               # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))])
            host_dict['roles'].append(role_dict)

        if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']:
            role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
            host_dict['roles'].append(role_dict)

        if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']:
            role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} }
            host_dict['roles'].append(role_dict)

        sanity_testbed_dict['hosts'].append(host_dict)
    if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw)

    # Read ToR config
    sanity_tor_dict = {}
    if env.has_key('tor_agent'):
        sanity_testbed_dict['tor_agent'] = env.tor_agent

    # Read any tor-host config
    if env.has_key('tor_hosts'):
        sanity_testbed_dict['tor_hosts'] = env.tor_hosts

    # Read any MX config (as physical_router )
    if env.has_key('physical_routers'):
        sanity_testbed_dict['physical_routers'] = env.physical_routers

    esxi_hosts = getattr(testbed, 'esxi_hosts', None)
    if esxi_hosts:
        for esxi in esxi_hosts:
            host_dict = {}
            host_dict['ip'] = esxi_hosts[esxi]['ip']
            host_dict['data-ip'] = host_dict['ip']
            host_dict['control-ip'] = host_dict['ip']
            host_dict['name'] = esxi
            host_dict['username'] = esxi_hosts[esxi]['username']
            host_dict['password'] = esxi_hosts[esxi]['password']
            host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host']
            host_dict['roles'] = []
            sanity_testbed_dict['hosts'].append(host_dict)
            sanity_testbed_dict['esxi_vms'].append(host_dict)
    # Adding vip VIP dict for HA test setup

    with settings(host_string = env.roledefs['openstack'][0]):
        if internal_vip:
            host_dict = {}
            host_dict['data-ip']= get_authserver_ip()
            host_dict['control-ip']= get_authserver_ip()
            host_dict['ip']= get_authserver_ip()
            host_dict['name'] = 'contrail-vip'
            with settings(host_string = env.roledefs['cfgm'][0]):
                host_dict['username'] = host_string.split('@')[0]
                host_dict['password'] = get_env_passwords(host_string)
            host_dict['roles'] = []
            role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}}
            host_dict['roles'].append(role_dict)
            sanity_testbed_dict['hosts'].append(host_dict)

    # get host ipmi list
    if env.has_key('hosts_ipmi'):
        sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi)

    # generate json file and copy to cfgm
    sanity_testbed_json = json.dumps(sanity_testbed_dict)

    stop_on_fail = env.get('stop_on_fail', False)
    mail_to = env.get('mail_to', '')
    log_scenario = env.get('log_scenario', 'Sanity')
    stack_user, stack_password = get_authserver_credentials()
    stack_tenant = get_admin_tenant_name()
    # Few hardcoded variables for sanity environment
    # can be removed once we move to python3 and configparser
    stack_domain = 'default-domain'
    webserver_host = '10.204.216.50'
    webserver_user = '******'
    webserver_password = '******'
    webserver_log_path = '/home/bhushana/Documents/technical/logs/'
    webserver_report_path = '/home/bhushana/Documents/technical/sanity'
    webroot = 'Docs/logs'
    mail_server = '10.204.216.49'
    mail_port = '25'
    fip_pool_name = 'floating-ip-pool'
    public_virtual_network='public'
    public_tenant_name='admin'
    fixture_cleanup = 'yes'
    generate_html_report = 'True'
    key = 'key1'
    mailSender = '*****@*****.**'

    use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False)
    orch = getattr(env, 'orchestrator', 'openstack')
    router_asn = getattr(testbed, 'router_asn', '')
    public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '')
    public_vn_subnet = getattr(testbed, 'public_vn_subnet', '')
    ext_routers = getattr(testbed, 'ext_routers', '')
    router_info = str(ext_routers)
    test_verify_on_setup = getattr(env, 'test_verify_on_setup', True)
    webui = getattr(testbed, 'webui', False)
    horizon = getattr(testbed, 'horizon', False)
    ui_config = getattr(testbed, 'ui_config', False)
    ui_browser = getattr(testbed, 'ui_browser', False)
    if 'mail_server' in env.keys():
        mail_server = env.mail_server
        mail_port = env.mail_port

    vcenter_dc = ''
    if orch == 'vcenter':
        public_tenant_name='vCenter'

    if env.has_key('vcenter'):
        if env.vcenter:
            vcenter_dc = env.vcenter['datacenter']

    sanity_params = sanity_ini_templ.safe_substitute(
        {'__testbed_json_file__'   : 'sanity_testbed.json',
         '__nova_keypair_name__'   : key,
         '__orch__'                : orch,
         '__stack_user__'          : stack_user,
         '__stack_password__'      : stack_password,
         '__auth_ip__'             : get_authserver_ip(),
         '__auth_port__'           : get_authserver_port(),
         '__stack_tenant__'        : stack_tenant,
         '__stack_domain__'        : stack_domain,
         '__multi_tenancy__'       : get_mt_enable(),
         '__address_family__'      : get_address_family(),
         '__log_scenario__'        : log_scenario,
         '__generate_html_report__': generate_html_report,
         '__fixture_cleanup__'     : fixture_cleanup,
         '__webserver__'           : webserver_host,
         '__webserver_user__'      : webserver_user,
         '__webserver_password__'  : webserver_password,
         '__webserver_log_dir__'   : webserver_log_path,
         '__webserver_report_dir__': webserver_report_path,
         '__webroot__'             : webroot,
         '__mail_server__'         : mail_server,
         '__mail_port__'           : mail_port,
         '__sender_mail_id__'      : mailSender,
         '__receiver_mail_id__'    : mail_to,
         '__http_proxy__'          : env.get('http_proxy', ''),
         '__ui_browser__'          : ui_browser,
         '__ui_config__'           : ui_config,
         '__horizon__'             : horizon,
         '__webui__'               : webui,
         '__devstack__'            : False,
         '__public_vn_rtgt__'      : public_vn_rtgt,
         '__router_asn__'          : router_asn,
         '__router_name_ip_tuples__': router_info,
         '__public_vn_name__'      : fip_pool_name,
         '__public_virtual_network__':public_virtual_network,
         '__public_tenant_name__'  :public_tenant_name,
         '__public_vn_subnet__'    : public_vn_subnet,
         '__test_revision__'       : revision,
         '__fab_revision__'        : fab_revision,
         '__test_verify_on_setup__': test_verify_on_setup,
         '__stop_on_fail__'        : stop_on_fail,
         '__ha_setup__'            : getattr(testbed, 'ha_setup', ''),
         '__ipmi_username__'       : getattr(testbed, 'ipmi_username', ''),
         '__ipmi_password__'       : getattr(testbed, 'ipmi_password', ''),
         '__vcenter_dc__'          : vcenter_dc,
         '__vcenter_server__'      : get_vcenter_ip(),
         '__vcenter_port__'        : get_vcenter_port(),
         '__vcenter_username__'    : get_vcenter_username(),
         '__vcenter_password__'    : get_vcenter_password(),
         '__vcenter_datacenter__'  : get_vcenter_datacenter(),
         '__vcenter_compute__'     : get_vcenter_compute(),
         '__use_devicemanager_for_md5__'       : use_devicemanager_for_md5,
        })

    ini_file = test_dir + '/' + 'sanity_params.ini'
    testbed_json_file = test_dir + '/' + 'sanity_testbed.json'
    with open(ini_file, 'w') as ini:
        ini.write(sanity_params)

    with open(testbed_json_file,'w') as tb:
        tb.write(sanity_testbed_json)
Esempio n. 39
0
def setup_keepalived():
    """Task to provision VIP for openstack/cfgm nodes with keepalived"""
    if get_openstack_internal_vip():
        execute('setup_openstack_keepalived')
    if get_contrail_internal_vip() != get_openstack_internal_vip():
        execute('setup_contrail_keepalived')
Esempio n. 40
0
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'):
    """
    Configure test environment by creating sanity_params.ini and sanity_testbed.json files
    """
    print "Configuring test environment"
    sys.path.insert(0, contrail_fab_path)
    from fabfile.testbeds import testbed
    from fabfile.utils.host import get_openstack_internal_vip, \
        get_control_host_string, get_authserver_ip, get_admin_tenant_name, \
        get_authserver_port, get_env_passwords, get_authserver_credentials, \
        get_vcenter_ip, get_vcenter_port, get_vcenter_username, \
        get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \
        get_authserver_protocol, get_region_name, get_contrail_internal_vip, \
        get_openstack_external_vip, get_contrail_external_vip
    from fabfile.utils.multitenancy import get_mt_enable
    from fabfile.utils.interface import get_data_ip
    from fabfile.tasks.install import update_config_option, update_js_config

    cfgm_host = env.roledefs['cfgm'][0]

    auth_protocol = get_authserver_protocol()
    auth_server_ip = get_authserver_ip()
    auth_server_port = get_authserver_port()
    with settings(warn_only=True), hide('everything'):
        with lcd(contrail_fab_path):
            if local('git branch').succeeded:
                fab_revision = local('git log --format="%H" -n 1', capture=True)
            else:
                with settings(host_string=cfgm_host), hide('everything'):
                   fab_revision = run('cat /opt/contrail/contrail_packages/VERSION')
        with lcd(test_dir):
            if local('git branch').succeeded:
                revision = local('git log --format="%H" -n 1', capture=True)
            else:
                with settings(host_string=cfgm_host), hide('everything'):
                    revision = run('cat /opt/contrail/contrail_packages/VERSION')

    sanity_testbed_dict = {
        'hosts': [],
        'vgw': [],
        'esxi_vms':[],
        'vcenter_servers':[],
        'hosts_ipmi': [],
        'tor':[],
    }

    sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample'
    with open(sample_ini_file, 'r') as fd_sample_ini:
       contents_sample_ini = fd_sample_ini.read()
    sanity_ini_templ = string.Template(contents_sample_ini)

    if env.get('orchestrator', 'openstack') != 'vcenter':
        with settings(host_string = env.roledefs['openstack'][0]), hide('everything'):
            openstack_host_name = run("hostname")

    with settings(host_string = env.roledefs['cfgm'][0]), hide('everything'):
        cfgm_host_name = run("hostname")

    control_host_names = []
    for control_host in env.roledefs['control']:
        with settings(host_string = control_host), hide('everything'):
            host_name = run("hostname")
            control_host_names.append(host_name)

    cassandra_host_names = []
    if 'database' in env.roledefs.keys():
        for cassandra_host in env.roledefs['database']:
            with settings(host_string = cassandra_host), hide('everything'):
                host_name = run("hostname")
                cassandra_host_names.append(host_name)

    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()
    contrail_internal_vip = get_contrail_internal_vip()
    contrail_external_vip = get_contrail_external_vip()
    multi_role_test = False
    for host_string in env.roledefs['all']:
        if host_string in env.roledefs.get('test',[]):
            for role in env.roledefs.iterkeys():
                if role in ['test','all']:
                    continue
                if host_string in env.roledefs.get(role,[]):
                    multi_role_test=True
                    break
            if not multi_role_test:
                continue
        host_ip = host_string.split('@')[1]
        with settings(host_string = host_string), hide('everything'):
            host_name = run("hostname")

        host_dict = {}

        host_dict['ip'] = host_ip
        host_dict['data-ip']= get_data_ip(host_string)[0]
        if host_dict['data-ip'] == host_string.split('@')[1]:
            host_dict['data-ip'] = get_data_ip(host_string)[0]
        host_dict['control-ip']= get_control_host_string(host_string).split('@')[1]

        host_dict['name'] = host_name
        host_dict['username'] = host_string.split('@')[0]
        host_dict['password'] =get_env_passwords(host_string)
        host_dict['roles'] = []

        if host_string in env.roledefs['openstack']:
            role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}}
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['cfgm']:
            role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}}

            if env.get('orchestrator', 'openstack') != 'vcenter':
                role_dict['openstack'] = openstack_host_name
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['control']:
            role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
            host_dict['roles'].append(role_dict)

        if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']:
            role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
            host_dict['roles'].append(role_dict)

        if host_string in env.roledefs['compute']:
            role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
            role_dict['params']['bgp'] = []
            if len(env.roledefs['control']) == 1:
                role_dict['params']['bgp'] = control_host_names
            else:
                for control_node in control_host_names:
                    role_dict['params']['bgp'].append(control_node)
               # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))])
            host_dict['roles'].append(role_dict)

        if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']:
            role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
            host_dict['roles'].append(role_dict)

        if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']:
            role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} }
            host_dict['roles'].append(role_dict)

        sanity_testbed_dict['hosts'].append(host_dict)
    if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw)

    # Read ToR config
    sanity_tor_dict = {}
    if env.has_key('tor_agent'):
        sanity_testbed_dict['tor_agent'] = env.tor_agent

    # Read any tor-host config
    if env.has_key('tor_hosts'):
        sanity_testbed_dict['tor_hosts'] = env.tor_hosts

    # Read any MX config (as physical_router )
    if env.has_key('physical_routers'):
        sanity_testbed_dict['physical_routers'] = env.physical_routers

    esxi_hosts = getattr(testbed, 'esxi_hosts', None)
    if esxi_hosts:
        for esxi in esxi_hosts:
            host_dict = {}
            host_dict['ip'] = esxi_hosts[esxi]['ip']
            host_dict['data-ip'] = host_dict['ip']
            host_dict['control-ip'] = host_dict['ip']
            host_dict['name'] = esxi
            host_dict['username'] = esxi_hosts[esxi]['username']
            host_dict['password'] = esxi_hosts[esxi]['password']
            host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host']
            host_dict['roles'] = []
            sanity_testbed_dict['hosts'].append(host_dict)
            sanity_testbed_dict['esxi_vms'].append(host_dict)

    vcenter_servers = env.get('vcenter_servers')
    if vcenter_servers:
        for vcenter in vcenter_servers:
            host_dict = {}
            host_dict['server'] = vcenter_servers[vcenter]['server']
            host_dict['port'] = vcenter_servers[vcenter]['port']
            host_dict['username'] = vcenter_servers[vcenter]['username']
            host_dict['password'] = vcenter_servers[vcenter]['password']
            host_dict['datacenter'] = vcenter_servers[vcenter]['datacenter']
            host_dict['auth'] = vcenter_servers[vcenter]['auth']
            host_dict['cluster'] = vcenter_servers[vcenter]['cluster']
            host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_switch']['dv_switch_name']
            host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_port_group']['dv_portgroup_name']
            sanity_testbed_dict['vcenter_servers'].append(host_dict)

    # get host ipmi list
    if env.has_key('hosts_ipmi'):
        sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi)


    if not getattr(env, 'test', None):
        env.test={}

    # generate json file and copy to cfgm
    sanity_testbed_json = json.dumps(sanity_testbed_dict)
    stack_user = env.test.get('stack_user', os.getenv('STACK_USER') or '')
    stack_password = env.test.get('stack_password',
                         os.getenv('STACK_PASSWORD') or '')
    stack_tenant = env.test.get('stack_tenant', os.getenv('STACK_TENANT') or '')
    tenant_isolation = env.test.get('tenant_isolation',
                           os.getenv('TENANT_ISOLATION') or '')

    stop_on_fail = env.get('stop_on_fail', False)
    mail_to = env.test.get('mail_to', os.getenv('MAIL_TO') or '')
    log_scenario = env.get('log_scenario', 'Sanity')
    stack_region_name = get_region_name()
    admin_user, admin_password = get_authserver_credentials()
    admin_tenant = get_admin_tenant_name()
    # Few hardcoded variables for sanity environment
    # can be removed once we move to python3 and configparser
    stack_domain = env.get('stack_domain', 'default-domain')
    webserver_host = env.test.get('webserver_host',
                         os.getenv('WEBSERVER_HOST') or '')
    webserver_user = env.test.get('webserver_user',
                         os.getenv('WEBSERVER_USER') or '')
    webserver_password = env.test.get('webserver_password',
                             os.getenv('WEBSERVER_PASSWORD') or '')
    webserver_log_path = env.test.get('webserver_log_path',
        os.getenv('WEBSERVER_LOG_PATH') or '/var/www/contrail-test-ci/logs/')
    webserver_report_path = env.test.get('webserver_report_path',
        os.getenv('WEBSERVER_REPORT_PATH') or '/var/www/contrail-test-ci/reports/')
    webroot = env.test.get('webroot',
                  os.getenv('WEBROOT') or 'contrail-test-ci')
    mail_server = env.test.get('mail_server', os.getenv('MAIL_SERVER') or '')
    mail_port = env.test.get('mail_port', os.getenv('MAIL_PORT') or '25')
    fip_pool_name = env.test.get('fip_pool_name',
                        os.getenv('FIP_POOL_NAME') or 'floating-ip-pool')
    public_virtual_network=env.test.get('public_virtual_network',
        os.getenv('PUBLIC_VIRTUAL_NETWORK') or 'public')
    public_tenant_name=env.test.get('public_tenant_name',
                           os.getenv('PUBLIC_TENANT_NAME') or 'admin')
    fixture_cleanup = env.test.get('fixture_cleanup',
                          os.getenv('FIXTURE_CLEANUP') or 'yes')
    generate_html_report = env.test.get('generate_html_report',
        os.getenv('GENERATE_HTML_REPORT') or 'True')
    keypair_name = env.test.get('keypair_name',
                       os.getenv('KEYPAIR_NAME') or 'contrail_key')
    mail_sender = env.test.get('mail_sender',
        os.getenv('MAIL_SENDER') or '*****@*****.**')
    discovery_ip = env.test.get('discovery_ip', os.getenv('DISCOVERY_IP') or '')
    config_api_ip = env.test.get('config_api_ip', os.getenv('CONFIG_API_IP') or '')
    analytics_api_ip = env.test.get('analytics_api_ip',
                           os.getenv('ANALYTICS_API_IP') or '')
    discovery_port = env.test.get('discovery_port',
                                  os.getenv('DISCOVERY_PORT') or '')
    config_api_port = env.test.get('config_api_port',
                                   os.getenv('CONFIG_API_PORT') or '')
    analytics_api_port = env.test.get('analytics_api_port',
                                      os.getenv('ANALYTICS_API_PORT') or '')
    control_port = env.test.get('control_port', os.getenv('CONTROL_PORT') or '')
    dns_port = env.test.get('dns_port', os.getenv('DNS_PORT') or '')
    agent_port = env.test.get('agent_port', os.getenv('AGENT_PORT') or '')
    user_isolation = env.test.get('user_isolation',
                                  bool(os.getenv('USER_ISOLATION') or True))

    use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False)
    orch = getattr(env, 'orchestrator', 'openstack')
    router_asn = getattr(testbed, 'router_asn', '')
    public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '')
    public_vn_subnet = getattr(testbed, 'public_vn_subnet', '')
    ext_routers = getattr(testbed, 'ext_routers', '')
    router_info = str(ext_routers)
    test_verify_on_setup = getattr(env, 'test_verify_on_setup', True)
    webui = getattr(testbed, 'webui', False)
    horizon = getattr(testbed, 'horizon', False)
    ui_config = getattr(testbed, 'ui_config', False)
    ui_browser = getattr(testbed, 'ui_browser', False)

    key_filename = env.get('key_filename', '')
    pubkey_filename = env.get('pubkey_filename', '')

    vcenter_dc = ''
    if orch == 'vcenter':
        public_tenant_name='vCenter'

    if env.has_key('vcenter_servers'):
            if env.vcenter_servers:
                for k in env.vcenter_servers:
                    vcenter_dc = env.vcenter_servers[k]['datacenter']

    sanity_params = sanity_ini_templ.safe_substitute(
        {'__testbed_json_file__'   : 'sanity_testbed.json',
         '__nova_keypair_name__'   : keypair_name,
         '__orch__'                : orch,
         '__admin_user__'          : admin_user,
         '__admin_password__'      : admin_password,
         '__admin_tenant__'        : admin_tenant,
         '__tenant_isolation__'    : tenant_isolation,
         '__stack_user__'          : stack_user,
         '__stack_password__'      : stack_password,
         '__auth_ip__'             : auth_server_ip,
         '__auth_port__'           : auth_server_port,
         '__auth_protocol__'       : auth_protocol,
         '__stack_region_name__'   : stack_region_name,
         '__stack_tenant__'        : stack_tenant,
         '__stack_domain__'        : stack_domain,
         '__multi_tenancy__'       : get_mt_enable(),
         '__address_family__'      : get_address_family(),
         '__log_scenario__'        : log_scenario,
         '__generate_html_report__': generate_html_report,
         '__fixture_cleanup__'     : fixture_cleanup,
         '__key_filename__'        : key_filename,
         '__pubkey_filename__'     : pubkey_filename,
         '__webserver__'           : webserver_host,
         '__webserver_user__'      : webserver_user,
         '__webserver_password__'  : webserver_password,
         '__webserver_log_dir__'   : webserver_log_path,
         '__webserver_report_dir__': webserver_report_path,
         '__webroot__'             : webroot,
         '__mail_server__'         : mail_server,
         '__mail_port__'           : mail_port,
         '__sender_mail_id__'      : mail_sender,
         '__receiver_mail_id__'    : mail_to,
         '__http_proxy__'          : env.get('http_proxy', ''),
         '__ui_browser__'          : ui_browser,
         '__ui_config__'           : ui_config,
         '__horizon__'             : horizon,
         '__webui__'               : webui,
         '__devstack__'            : False,
         '__public_vn_rtgt__'      : public_vn_rtgt,
         '__router_asn__'          : router_asn,
         '__router_name_ip_tuples__': router_info,
         '__public_vn_name__'      : fip_pool_name,
         '__public_virtual_network__':public_virtual_network,
         '__public_tenant_name__'  :public_tenant_name,
         '__public_vn_subnet__'    : public_vn_subnet,
         '__test_revision__'       : revision,
         '__fab_revision__'        : fab_revision,
         '__test_verify_on_setup__': test_verify_on_setup,
         '__stop_on_fail__'        : stop_on_fail,
         '__ha_setup__'            : getattr(testbed, 'ha_setup', ''),
         '__ipmi_username__'       : getattr(testbed, 'ipmi_username', ''),
         '__ipmi_password__'       : getattr(testbed, 'ipmi_password', ''),
         '__contrail_internal_vip__' : contrail_internal_vip,
         '__contrail_external_vip__' : contrail_external_vip,
         '__internal_vip__'        : internal_vip,
         '__external_vip__'        : external_vip,
         '__vcenter_dc__'          : vcenter_dc,
         '__vcenter_server__'      : get_vcenter_ip(),
         '__vcenter_port__'        : get_vcenter_port(),
         '__vcenter_username__'    : get_vcenter_username(),
         '__vcenter_password__'    : get_vcenter_password(),
         '__vcenter_datacenter__'  : get_vcenter_datacenter(),
         '__vcenter_compute__'     : get_vcenter_compute(),
         '__use_devicemanager_for_md5__'       : use_devicemanager_for_md5,
         '__discovery_port__'      : discovery_port,
         '__config_api_port__'     : config_api_port,
         '__analytics_api_port__'  : analytics_api_port,
         '__control_port__'        : control_port,
         '__dns_port__'            : dns_port,
         '__vrouter_agent_port__'  : agent_port,
         '__discovery_ip__'        : discovery_ip,
         '__config_api_ip__'       : config_api_ip,
         '__analytics_api_ip__'    : analytics_api_ip,
         '__user_isolation__'      : user_isolation,
        })

    ini_file = test_dir + '/' + 'sanity_params.ini'
    testbed_json_file = test_dir + '/' + 'sanity_testbed.json'
    with open(ini_file, 'w') as ini:
        ini.write(sanity_params)

    with open(testbed_json_file,'w') as tb:
        tb.write(sanity_testbed_json)

    # Create /etc/contrail/openstackrc
    if not os.path.exists('/etc/contrail'):
        os.makedirs('/etc/contrail')

    with open('/etc/contrail/openstackrc','w') as rc:
        rc.write("export OS_USERNAME=%s\n" % admin_user)
        rc.write("export OS_PASSWORD=%s\n" % admin_password)
        rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant)
        rc.write("export OS_REGION_NAME=%s\n" % stack_region_name)
        rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol,
                                                           auth_server_ip,
                                                           auth_server_port))
        rc.write("export OS_NO_CACHE=1\n")

    # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone
    config = ConfigParser.ConfigParser()
    config.optionxform = str
    vnc_api_ini = '/etc/contrail/vnc_api_lib.ini'
    if os.path.exists(vnc_api_ini):
        config.read(vnc_api_ini)

    if 'auth' not in config.sections():
        config.add_section('auth')

    config.set('auth','AUTHN_TYPE', 'keystone')
    config.set('auth','AUTHN_PROTOCOL', auth_protocol)
    config.set('auth','AUTHN_SERVER', auth_server_ip)
    config.set('auth','AUTHN_PORT', auth_server_port)
    config.set('auth','AUTHN_URL', '/v2.0/tokens')

    with open(vnc_api_ini,'w') as f:
        config.write(f)

    # If webui = True, in testbed, setup webui for sanity
    if webui:
        install_webui_packages(testbed)
        update_config_option('openstack', '/etc/keystone/keystone.conf',
                             'token', 'expiration',
                             '86400','keystone')
        update_js_config('openstack', '/etc/contrail/config.global.js',
                         'contrail-webui')
Esempio n. 41
0
def join_galera_cluster(new_ctrl_host):
    """ Task to join a new into an existing Galera cluster """
    execute('setup_passwordless_ssh', *env.roledefs['openstack'])

    # Adding the user permission for the node to be added in
    # the other nodes.
    new_ctrl_data_host_string = get_control_host_string(new_ctrl_host)
    new_ctrl_ip = new_ctrl_data_host_string.split('@')[1]

    for host_string in env.roledefs['openstack']:
        if host_string != new_ctrl_host:
            with settings(host_string=host_string):
                cmd = "add-mysql-perm --node_to_add %s" % new_ctrl_ip
                sudo(cmd)

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]

    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]

    authserver_ip = get_authserver_ip()
    internal_vip = get_openstack_internal_vip()
    external_vip = get_openstack_external_vip()

    with settings(host_string = new_ctrl_host):
        zoo_ip_list = [hstr_to_ip(get_control_host_string(\
                        cassandra_host)) for cassandra_host in env.roledefs['database']]

        monitor_galera="False"
        if get_openstack_internal_vip():
            monitor_galera="True"

        cmon_db_user="******"
        cmon_db_pass="******"
        keystone_db_user="******"
        keystone_db_pass="******"

        cmd = "setup-vnc-galera\
            --self_ip %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d --zoo_ip_list %s --keystone_user %s\
            --keystone_pass %s --cmon_user %s --cmon_pass %s --monitor_galera %s" % (new_ctrl_ip,
            authserver_ip, ' '.join(galera_ip_list), internal_vip,
            (openstack_host_list.index(new_ctrl_data_host_string) + 1), ' '.join(zoo_ip_list),
            keystone_db_user, keystone_db_pass, cmon_db_user, cmon_db_pass, monitor_galera)

        if external_vip:
            cmd += ' --external_vip %s' % external_vip
        sudo(cmd)

    for host_string in env.roledefs['openstack']:
        if host_string != new_ctrl_host:
            with settings(host_string=host_string):
                self_host = get_control_host_string(env.host_string)
                self_ip = hstr_to_ip(self_host)

                cmd = "add-galera-config\
                     --node_to_add %s\
                     --self_ip %s\
                     --keystone_ip %s\
                     --zoo_ip_list %s\
                     --keystone_user %s\
                     --keystone_pass %s\
                     --cmon_user %s\
                     --cmon_pass %s\
                     --monitor_galera %s\
                     --galera_ip_list %s\
                     --internal_vip %s\
                     --openstack_index %d" % (new_ctrl_ip,
                     self_ip, authserver_ip,
                     ' '.join(zoo_ip_list), 
                     keystone_db_user, keystone_db_pass,
                     cmon_db_user, cmon_db_pass, monitor_galera, 
                     ' '.join(galera_ip_list), internal_vip,
                     (openstack_host_list.index(self_host) + 1))
                sudo(cmd)