示例#1
0
def setup_keepalived():
    """Task to provision VIP for openstack nodes with keepalived"""
    mgmt_ip = hstr_to_ip(env.host_string)
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]

    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'

    internal_vip = get_from_testbed_dict('ha', 'internal_vip', None)
    external_vip = get_from_testbed_dict('ha', 'external_vip', None)
    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]

    with cd(INSTALLER_DIR):
        cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\
               --self_ip %s --internal_vip %s --mgmt_self_ip %s\
               --openstack_index %d" % (
            openstack_host_password, openstack_admin_password, self_ip,
            internal_vip, mgmt_ip, (openstack_host_list.index(self_host) + 1))
        if external_vip:
            cmd += ' --external_vip %s' % external_vip
        run(cmd)
示例#2
0
def setup_keepalived():
    """Task to provision VIP for openstack nodes with keepalived"""
    mgmt_ip = hstr_to_ip(env.host_string)
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]
    
    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'
        
    internal_vip = get_from_testbed_dict('ha', 'internal_vip', None)
    external_vip = get_from_testbed_dict('ha', 'external_vip', None)
    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    myindex = openstack_host_list.index(self_host)
    if myindex >= 1:
        # Wait for VIP to be assiciated to MASTER
        with settings(host_string=env.roledefs['openstack'][0], warn_only=True):
            while run("ip addr | grep %s" % internal_vip).failed:
                sleep(2)
                print "Waiting for VIP to be associated to MASTER VRRP."
                continue
 
    with cd(INSTALLER_DIR):
        cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\
               --self_ip %s --internal_vip %s --mgmt_self_ip %s\
               --openstack_index %d --num_nodes %d" % (openstack_host_password,
               openstack_admin_password, self_ip, internal_vip, mgmt_ip,
               (openstack_host_list.index(self_host) + 1), len(env.roledefs['openstack']))
        if external_vip:
             cmd += ' --external_vip %s' % external_vip
        run(cmd)
示例#3
0
def setup_keepalived():
    """Task to provision VIP for openstack nodes with keepalived"""
    mgmt_ip = hstr_to_ip(env.host_string)
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]
    
    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'
        
    internal_vip = get_from_testbed_dict('ha', 'internal_vip', None)
    external_vip = get_from_testbed_dict('ha', 'external_vip', None)
    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
 
    with cd(INSTALLER_DIR):
        cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\
               --self_ip %s --internal_vip %s --mgmt_self_ip %s\
               --openstack_index %d" % (openstack_host_password,
               openstack_admin_password, self_ip, internal_vip, mgmt_ip,
               (openstack_host_list.index(self_host) + 1))
        if external_vip:
             cmd += ' --external_vip %s' % external_vip
        run(cmd)
示例#4
0
def setup_keepalived():
    """Task to provision VIP for openstack nodes with keepalived"""
    mgmt_ip = hstr_to_ip(env.host_string)
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]
    
    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'
        
    internal_vip = get_from_testbed_dict('ha', 'internal_vip', None)
    external_vip = get_from_testbed_dict('ha', 'external_vip', None)
    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    myindex = openstack_host_list.index(self_host)
    if myindex >= 1:
        # Wait for VIP to be assiciated to MASTER
        with settings(host_string=env.roledefs['openstack'][0], warn_only=True):
            while run("ip addr | grep %s" % internal_vip).failed:
                sleep(2)
                print "Waiting for VIP to be associated to MASTER VRRP."
                continue
 
    with cd(INSTALLER_DIR):
        cmd = "PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-keepalived.py\
               --self_ip %s --internal_vip %s --mgmt_self_ip %s\
               --openstack_index %d --num_nodes %d" % (openstack_host_password,
               openstack_admin_password, self_ip, internal_vip, mgmt_ip,
               (openstack_host_list.index(self_host) + 1), len(env.roledefs['openstack']))
        if external_vip:
             cmd += ' --external_vip %s' % external_vip
        run(cmd)
示例#5
0
def mount_glance_images():
    nfs_server = get_from_testbed_dict('ha', 'nfs_server', hstr_to_ip(get_nfs_server()))
    nfs_glance_path = get_from_testbed_dict('ha', 'nfs_glance_path', '/var/tmp/glance-images/')
    with settings(warn_only=True):
        out = sudo('sudo mount %s:%s /var/lib/glance/images' % (nfs_server, nfs_glance_path))
        if out.failed and 'already mounted' not in out:
            raise RuntimeError(out)
        if sudo('grep "%s:%s /var/lib/glance/images nfs" /etc/fstab' % (nfs_server, nfs_glance_path)).failed:
            sudo('echo "%s:%s /var/lib/glance/images nfs nfsvers=3,hard,intr,auto 0 0" >> /etc/fstab' % (nfs_server, nfs_glance_path))
示例#6
0
def mount_glance_images():
    nfs_server = get_from_testbed_dict('ha', 'nfs_server', hstr_to_ip(env.roledefs['compute'][0]))
    nfs_glance_path = get_from_testbed_dict('ha', 'nfs_glance_path', '/var/tmp/glance-images/')
    with settings(warn_only=True):
        out = run('sudo mount %s:%s /var/lib/glance/images' % (nfs_server, nfs_glance_path))
        if out.failed and 'already mounted' not in out:
            raise RuntimeError(out)
        if run('grep "%s:%s /var/lib/glance/images nfs" /etc/fstab' % (nfs_server, nfs_glance_path)).failed:
            run('echo "%s:%s /var/lib/glance/images nfs nfsvers=3,hard,intr,auto 0 0" >> /etc/fstab' % (nfs_server, nfs_glance_path))
示例#7
0
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    amqp_roles = []
    rabbit_servers = get_from_testbed_dict('cfgm', 'amqp_hosts', None)
    if rabbit_servers:
        print "Using external rabbitmq servers %s" % rabbit_servers
    else:
        # Provision rabbitmq cluster in cfgm role nodes.
        print "Provisioning rabbitq in cfgm nodes"
        amqp_roles = ['cfgm']

    # Provision rabbitmq cluster in openstack on request
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')

    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute("verify_cluster_status", retry='no')
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        if not is_xenial_or_above():
            execute(listen_at_supervisor_support_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(60)
        #execute(rabbitmqctl_stop_app)
        #execute(rabbitmqctl_reset)
        #execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        #execute(add_node_to_rabbitmq_cluster)
        #execute(rabbitmqctl_start_app)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')
            execute('set_tcp_keepalive_on_compute')
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    amqp_roles = []
    rabbit_servers = get_from_testbed_dict('cfgm', 'amqp_hosts', None)
    if rabbit_servers:
        print "Using external rabbitmq servers %s" % rabbit_servers
    else:
        # Provision rabbitmq cluster in cfgm role nodes.
        print "Provisioning rabbitq in cfgm nodes"
        amqp_roles = ['cfgm']

    # Provision rabbitmq cluster in openstack on request
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')

    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute("verify_cluster_status", retry='no')
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        execute(listen_at_supervisor_support_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(60)
        #execute(rabbitmqctl_stop_app)
        #execute(rabbitmqctl_reset)
        #execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        #execute(add_node_to_rabbitmq_cluster)
        #execute(rabbitmqctl_start_app)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')
            execute('set_tcp_keepalive_on_compute')
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
示例#9
0
def setup_glance_images_loc():
    nfs_server = get_from_testbed_dict('ha', 'nfs_server', None)
    nfs_glance_path = get_from_testbed_dict('ha', 'nfs_glance_path', '/var/tmp/glance-images/')
    if not nfs_server:
        with settings(host_string=env.roledefs['compute'][0]):
            run('mkdir -p /var/tmp/glance-images/')
            run('chmod 777 /var/tmp/glance-images/')
            run('echo "/var/tmp/glance-images *(rw,sync,no_subtree_check)" >> /etc/exports')
            run('sudo /etc/init.d/nfs-kernel-server restart')
    execute('mount_glance_images')
示例#10
0
def setup_glance_images_loc():
    nfs_server = get_from_testbed_dict('ha', 'nfs_server', None)
    nfs_glance_path = get_from_testbed_dict('ha', 'nfs_glance_path', '/var/tmp/glance-images/')
    if not nfs_server:
        with settings(host_string=env.roledefs['compute'][0]):
            run('mkdir -p /var/tmp/glance-images/')
            run('chmod 777 /var/tmp/glance-images/')
            run('echo "/var/tmp/glance-images *(rw,sync,no_subtree_check)" >> /etc/exports')
            run('sudo /etc/init.d/nfs-kernel-server restart')
    execute('mount_glance_images')
示例#11
0
def setup_glance_images_loc():
    nfs_server = get_from_testbed_dict("ha", "nfs_server", None)
    nfs_glance_path = get_from_testbed_dict("ha", "nfs_glance_path", "/var/tmp/glance-images/")
    if not nfs_server:
        nfs_server = get_nfs_server()
        with settings(host_string=nfs_server):
            sudo("mkdir -p /var/tmp/glance-images/")
            sudo("chmod 777 /var/tmp/glance-images/")
            sudo('echo "/var/tmp/glance-images *(rw,sync,no_subtree_check)" >> /etc/exports')
            sudo("sudo /etc/init.d/nfs-kernel-server restart")
    execute("mount_glance_images")
示例#12
0
def setup_galera_cluster():
    """Task to cluster the openstack nodes with galera"""
    if len(env.roledefs['openstack']) <= 1:
        print "Single Openstack cluster, skipping galera cluster setup."
        return

    if env.roledefs['openstack'].index(env.host_string) == 0:
        execute('setup_passwordless_ssh', *env.roledefs['openstack'])
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]

    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    keystone_ip = get_keystone_ip()
    internal_vip = get_from_testbed_dict('ha', 'internal_vip', None)

    with cd(INSTALLER_DIR):
        run("PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-galera.py\
            --self_ip %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d" %
            (openstack_host_password, openstack_admin_password, self_ip,
             keystone_ip, ' '.join(galera_ip_list), internal_vip,
             (openstack_host_list.index(self_host) + 1)))
示例#13
0
def setup_galera_cluster():
    """Task to cluster the openstack nodes with galera"""
    if len(env.roledefs['openstack']) <= 1:
        print "Single Openstack cluster, skipping galera cluster setup."
        return

    if env.roledefs['openstack'].index(env.host_string) == 0:
        execute('setup_passwordless_ssh', *env.roledefs['openstack'])
    self_host = get_control_host_string(env.host_string)
    self_ip = hstr_to_ip(self_host)
    openstack_host_password = env.passwords[env.host_string]

    if (getattr(env, 'openstack_admin_password', None)):
        openstack_admin_password = env.openstack_admin_password
    else:
        openstack_admin_password = '******'

    openstack_host_list = [get_control_host_string(openstack_host)\
                           for openstack_host in env.roledefs['openstack']]
    galera_ip_list = [hstr_to_ip(galera_host)\
                      for galera_host in openstack_host_list]
    keystone_ip = get_keystone_ip()
    internal_vip = get_from_testbed_dict('ha', 'internal_vip', None)

    with cd(INSTALLER_DIR):
        run("PASSWORD=%s ADMIN_TOKEN=%s python setup-vnc-galera.py\
            --self_ip %s --keystone_ip %s --galera_ip_list %s\
            --internal_vip %s --openstack_index %d" % (openstack_host_password,
                openstack_admin_password, self_ip, keystone_ip,
                ' '.join(galera_ip_list), internal_vip,
                (openstack_host_list.index(self_host) + 1)))
示例#14
0
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    if len(env.roledefs['cfgm']) <= 1:
        print "Single cfgm cluster, skipping rabbitmq cluster setup."
        return 

    if not force:
        with settings(warn_only=True):
            result = execute(verify_cluster_status)
        if result and False not in result.values():
            print "RabbitMQ cluster is up and running; No need to cluster again."
            return

    rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
    if not rabbitmq_cluster_uuid:
        rabbitmq_cluster_uuid = uuid.uuid4()

    execute(listen_at_supervisor_config_port)
    execute(remove_mnesia_database)
    execute(verify_cfgm_hostname)
    execute(allow_rabbitmq_port)
    execute(config_rabbitmq)
    execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
    execute(start_rabbitmq)
    #execute(rabbitmqctl_stop_app)
    #execute(rabbitmqctl_reset)
    #execute("rabbitmqctl_start_app_node", env.roledefs['cfgm'][0])
    #execute(add_cfgm_to_rabbitmq_cluster)
    #execute(rabbitmqctl_start_app)
    if get_from_testbed_dict('ha', 'internal_vip', None):
        execute('set_ha_policy_in_rabbitmq')
    result = execute(verify_cluster_status)
    if False in result.values():
        print "Unable to setup RabbitMQ cluster...."
        exit(1)
示例#15
0
def setup_ha():
    execute('pre_check')
    if get_from_testbed_dict('ha', 'internal_vip', None):
        print "Multi Openstack setup, provisioning openstack HA."
        execute('setup_keepalived')
        execute('setup_galera_cluster')
        execute('fix_wsrep_cluster_address')
        execute('fixup_restart_haproxy_in_openstack')
        execute('setup_glance_images_loc')
        execute('fix_memcache_conf')
        execute('tune_tcp')
示例#16
0
def install_openstack_node(*args):
    """Installs openstack pkgs in one or list of nodes. USAGE:fab install_openstack_node:[email protected],[email protected]"""
    for host_string in args:
        with settings(host_string=host_string):
            pkg = ['contrail-openstack']
            if len(env.roledefs['openstack']) > 1 and get_from_testbed_dict('ha', 'internal_vip', None):
                pkg.append('contrail-openstack-ha')
            if detect_ostype() == 'Ubuntu':
                apt_install(pkg)
            else:
                yum_install(pkg)
示例#17
0
def install_openstack_node(*args):
    """Installs openstack pkgs in one or list of nodes. USAGE:fab install_openstack_node:[email protected],[email protected]"""
    for host_string in args:
        with settings(host_string=host_string):
            pkg = ['contrail-openstack']
            if len(env.roledefs['openstack']) > 1 and get_from_testbed_dict(
                    'ha', 'internal_vip', None):
                pkg.append('contrail-openstack-ha')
            if detect_ostype() == 'Ubuntu':
                apt_install(pkg)
            else:
                yum_install(pkg)
示例#18
0
def setup_ha():
    execute('pre_check')
    if get_from_testbed_dict('ha', 'internal_vip', None):
        print "Multi Openstack setup, provisioning openstack HA."
        execute('setup_keepalived')
        execute('setup_galera_cluster')
        execute('fix_wsrep_cluster_address')
        execute('fix_restart_xinetd_conf')
        execute('fixup_restart_haproxy_in_openstack')
        execute('fixup_restart_haproxy_in_collector')
        execute('setup_glance_images_loc')
        execute('fix_memcache_conf')
        execute('tune_tcp')
        execute('fix_cmon_param_and_add_keys_to_compute')
        execute('create_and_copy_service_token')
示例#19
0
def setup_ha():
    execute('pre_check')
    if get_from_testbed_dict('ha', 'internal_vip', None):
        print "Multi Openstack setup, provisioning openstack HA."
        execute('setup_keepalived')
        execute('setup_galera_cluster')
        execute('fix_wsrep_cluster_address')
        execute('fix_restart_xinetd_conf')
        execute('fixup_restart_haproxy_in_openstack')
        execute('fixup_restart_haproxy_in_collector')
        execute('setup_glance_images_loc')
        execute('fix_memcache_conf')
        execute('tune_tcp')
        execute('fix_cmon_param_and_add_keys_to_compute')
        execute('create_and_copy_service_token')
示例#20
0
def fix_cmon_param_and_add_keys_to_compute():
    cmon_param = '/etc/contrail/ha/cmon_param'
    compute_host_list = []
    for host_string in env.roledefs['compute']:
        with settings(host_string=host_string,
                      password=env.passwords[host_string]):
            host_name = sudo('hostname')
        compute_host_list.append(host_name)

    # Get AMQP host list
    amqp_in_role = 'cfgm'
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        amqp_in_role = 'openstack'
    amqp_host_list = []
    for host_string in env.roledefs[amqp_in_role]:
        with settings(host_string=host_string,
                      password=env.passwords[host_string]):
            host_name = sudo('hostname')
        amqp_host_list.append(host_name)

    computes = 'COMPUTES=("' + '" "'.join(compute_host_list) + '")'
    sudo("echo '%s' >> %s" % (computes, cmon_param))
    sudo("echo 'COMPUTES_SIZE=${#COMPUTES[@]}' >> %s" % cmon_param)
    sudo("echo 'COMPUTES_USER=root' >> %s" % cmon_param)
    sudo("echo 'PERIODIC_RMQ_CHK_INTER=60' >> %s" % cmon_param)
    sudo("echo 'RABBITMQ_RESET=True' >> %s" % cmon_param)
    amqps = 'DIPHOSTS=("' + '" "'.join(amqp_host_list) + '")'
    sudo("echo '%s' >> %s" % (amqps, cmon_param))
    sudo("echo 'DIPS_HOST_SIZE=${#DIPHOSTS[@]}' >> %s" % cmon_param)
    id_rsa_pubs = {}
    if files.exists('~/.ssh', use_sudo=True):
        sudo('chmod 700 ~/.ssh')
    if (not files.exists('~/.ssh/id_rsa', use_sudo=True)
            and not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)):
        sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""')
    elif (not files.exists('~/.ssh/id_rsa', use_sudo=True)
          or not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)):
        sudo('rm -rf ~/.ssh/id_rsa*')
        sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""')
    id_rsa_pubs.update({env.host_string: sudo('cat ~/.ssh/id_rsa.pub')})
    for host_string in env.roledefs['compute']:
        with settings(host_string=host_string):
            sudo("mkdir -p ~/.ssh/")
            for host, id_rsa_pub in id_rsa_pubs.items():
                files.append('~/.ssh/authorized_keys',
                             id_rsa_pub,
                             use_sudo=True)
            sudo('chmod 640 ~/.ssh/authorized_keys')
示例#21
0
def fix_cmon_param_and_add_keys_to_compute():
    cmon_param = '/etc/contrail/ha/cmon_param'
    compute_host_list = []
    for host_string in env.roledefs['compute']:
        with settings(host_string=host_string, password=get_env_passwords(host_string)):
            host_name = sudo('hostname')
        compute_host_list.append(host_name)

    # Get AMQP host list
    amqp_in_role = 'cfgm'
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        amqp_in_role = 'openstack'
    amqp_host_list = []
    for host_string in env.roledefs[amqp_in_role]:
        with settings(host_string=host_string, password=get_env_passwords(host_string)):
            host_name = sudo('hostname -s')
        amqp_host_list.append(host_name)

    computes = 'COMPUTES=("' + '" "'.join(compute_host_list) + '")'
    sudo("grep -q 'COMPUTES' %s || echo '%s' >> %s" % (cmon_param, computes, cmon_param))
    sudo("grep -q 'COMPUTES_SIZE' %s || echo 'COMPUTES_SIZE=${#COMPUTES[@]}' >> %s" % (cmon_param, cmon_param))
    sudo("grep -q 'COMPUTES_USER' %s || echo 'COMPUTES_USER=root' >> %s" % (cmon_param, cmon_param))
    sudo("grep -q 'PERIODIC_RMQ_CHK_INTER' %s || echo 'PERIODIC_RMQ_CHK_INTER=60' >> %s" % (cmon_param, cmon_param))
    sudo("grep -q 'RABBITMQ_RESET' %s || echo 'RABBITMQ_RESET=True' >> %s" % (cmon_param, cmon_param))
    amqps = 'DIPHOSTS=("' + '" "'.join(amqp_host_list) + '")'
    sudo("grep -q 'DIPHOSTS' %s || echo '%s' >> %s" % (cmon_param, amqps, cmon_param))
    sudo("grep -q 'DIPS_HOST_SIZE' %s || echo 'DIPS_HOST_SIZE=${#DIPHOSTS[@]}' >> %s" % (cmon_param, cmon_param))
    sudo("sort %s | uniq > /tmp/cmon_param" % cmon_param)
    sudo("mv /tmp/cmon_param %s" % cmon_param)
    id_rsa_pubs = {}
    if files.exists('~/.ssh', use_sudo=True):
        sudo('chmod 700 ~/.ssh')
    if (not files.exists('~/.ssh/id_rsa', use_sudo=True) and
        not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)):
        sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""')
    elif (not files.exists('~/.ssh/id_rsa', use_sudo=True) or
         not files.exists('~/.ssh/id_rsa.pub', use_sudo=True)):
        sudo('rm -rf ~/.ssh/id_rsa*')
        sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""')
    id_rsa_pubs.update({env.host_string : sudo('cat ~/.ssh/id_rsa.pub')})
    for host_string in env.roledefs['compute']:
        with settings(host_string=host_string):
            sudo("mkdir -p ~/.ssh/")
            for host, id_rsa_pub in id_rsa_pubs.items():
                files.append('~/.ssh/authorized_keys',
                             id_rsa_pub, use_sudo=True)
            sudo('chmod 640 ~/.ssh/authorized_keys')
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ['cfgm']
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')
    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute(verify_cluster_status)
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        execute(listen_at_supervisor_config_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        if len(env.roledefs['rabbit']) <= 1:
            print "Single cfgm cluster, Starting rabbitmq."
            return
        #execute(rabbitmqctl_stop_app)
        #execute(rabbitmqctl_reset)
        #execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        #execute(add_node_to_rabbitmq_cluster)
        #execute(rabbitmqctl_start_app)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')
            execute('set_tcp_keepalive_on_compute')
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
示例#23
0
def join_rabbitmq_cluster(new_ctrl_host):
    """ Task to join a new rabbit server into an existing cluster """
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ['cfgm']
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        #Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')
    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        # copy the erlang cookie from one of the other nodes.
        rabbitmq_cluster_uuid = None
        for host_string in env.roledefs['rabbit']:
            with settings(host_string=host_string, warn_only=True):
                if host_string != new_ctrl_host and\
                   sudo('ls /var/lib/rabbitmq/.erlang.cookie').succeeded:
                    rabbitmq_cluster_uuid = \
                        sudo('cat /var/lib/rabbitmq/.erlang.cookie')
                    break;
        if rabbitmq_cluster_uuid is None:
            raise RuntimeError("Not able to get the Erlang cookie from the cluster nodes")

        if not is_xenial_or_above():
            execute(listen_at_supervisor_support_port_node, new_ctrl_host)
        execute(remove_mnesia_database_node, new_ctrl_host)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port_node, new_ctrl_host)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute('stop_rabbitmq_and_set_cookie_node', rabbitmq_cluster_uuid, new_ctrl_host)
        execute('start_rabbitmq_node', new_ctrl_host)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(30)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')

        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
示例#24
0
def purge_node_from_rabbitmq_cluster(del_rabbitmq_node, role):

    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'no' and\
                             role == 'openstack':
        # We are not managing the RabbitMQ server. No-op.
        return

    if get_contrail_internal_vip() != get_openstack_internal_vip() and\
       role == 'cfgm':
        # Openstack and Contrail are in two different nodes. Cfgm
        # rabbitmq will point to the Openstack node. No-op.
        return

    env.roledefs['rabbit'] = env.roledefs[role]
    del_rabbitmq_ip = hstr_to_ip(del_rabbitmq_node)
    del_rabbitmq_ctrl_ip = hstr_to_ip(get_control_host_string(del_rabbitmq_node))
    if ping_test(del_rabbitmq_node):
        with settings(host_string = del_rabbitmq_node, warn_only = True):
            sudo("rabbitmqctl stop_app")
            sudo("rabbitmqctl reset")
            sudo("service supervisor-support-service stop")
            sudo("mv /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.removed")
            sudo("mv /etc/rabbitmq/rabbitmq.config /etc/rabbitmq/rabbitmq.config.removed")
    else:
        # If the node is not reachable, then delete the node remotely from one
        # of the nodes in the cluster.
        with settings(host_string = env.roledefs['rabbit'][0], warn_only = True):
            hostname = local('getent hosts %s | awk \'{print $3\'}' % del_rabbitmq_ctrl_ip, capture = True)
            sudo("rabbitmqctl forget_cluster_node rabbit@%s" % hostname)

    # Giving some time for the other nodes to re-adjust the cluster, 
    time.sleep(30)

    execute(config_rabbitmq)
    for host_string in env.roledefs[role]:
        with settings(host_string = host_string):
            sudo("service rabbitmq-server restart")
            # Give time for RabbitMQ to recluster
            time.sleep(30)

    result = execute(verify_cluster_status)
    if False in result.values():
        print "Unable to recluster RabbitMQ cluster after removing the node %s" % del_rabbitmq_node
        exit(1)
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ["cfgm"]
    if get_from_testbed_dict("openstack", "manage_amqp", "no") == "yes":
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append("openstack")
    for role in amqp_roles:
        env.roledefs["rabbit"] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute("verify_cluster_status", retry="no")
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, "rabbitmq_cluster_uuid", None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        execute(listen_at_supervisor_support_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        # adding sleep to workaround rabbitmq bug 26370 prevent "rabbitmqctl cluster_status" from breaking the database, this is seen in ci
        time.sleep(60)
        # execute(rabbitmqctl_stop_app)
        # execute(rabbitmqctl_reset)
        # execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        # execute(add_node_to_rabbitmq_cluster)
        # execute(rabbitmqctl_start_app)
        if role is "openstack" and get_openstack_internal_vip() or role is "cfgm" and get_contrail_internal_vip():
            execute("set_ha_policy_in_rabbitmq")
            execute("set_tcp_keepalive")
            execute("set_tcp_keepalive_on_compute")
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
示例#26
0
def fix_cmon_param_and_add_keys_to_compute():
    cmon_param = "/etc/contrail/ha/cmon_param"
    compute_host_list = []
    for host_string in env.roledefs["compute"]:
        with settings(host_string=host_string, password=get_env_passwords(host_string)):
            host_name = sudo("hostname")
        compute_host_list.append(host_name)

    # Get AMQP host list
    amqp_in_role = "cfgm"
    if get_from_testbed_dict("openstack", "manage_amqp", "no") == "yes":
        amqp_in_role = "openstack"
    amqp_host_list = []
    for host_string in env.roledefs[amqp_in_role]:
        with settings(host_string=host_string, password=get_env_passwords(host_string)):
            host_name = sudo("hostname")
        amqp_host_list.append(host_name)

    computes = 'COMPUTES=("' + '" "'.join(compute_host_list) + '")'
    sudo("echo '%s' >> %s" % (computes, cmon_param))
    sudo("echo 'COMPUTES_SIZE=${#COMPUTES[@]}' >> %s" % cmon_param)
    sudo("echo 'COMPUTES_USER=root' >> %s" % cmon_param)
    sudo("echo 'PERIODIC_RMQ_CHK_INTER=60' >> %s" % cmon_param)
    sudo("echo 'RABBITMQ_RESET=True' >> %s" % cmon_param)
    amqps = 'DIPHOSTS=("' + '" "'.join(amqp_host_list) + '")'
    sudo("echo '%s' >> %s" % (amqps, cmon_param))
    sudo("echo 'DIPS_HOST_SIZE=${#DIPHOSTS[@]}' >> %s" % cmon_param)
    sudo("sort %s | uniq > /tmp/cmon_param" % cmon_param)
    sudo("mv /tmp/cmon_param %s" % cmon_param)
    id_rsa_pubs = {}
    if files.exists("~/.ssh", use_sudo=True):
        sudo("chmod 700 ~/.ssh")
    if not files.exists("~/.ssh/id_rsa", use_sudo=True) and not files.exists("~/.ssh/id_rsa.pub", use_sudo=True):
        sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""')
    elif not files.exists("~/.ssh/id_rsa", use_sudo=True) or not files.exists("~/.ssh/id_rsa.pub", use_sudo=True):
        sudo("rm -rf ~/.ssh/id_rsa*")
        sudo('ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""')
    id_rsa_pubs.update({env.host_string: sudo("cat ~/.ssh/id_rsa.pub")})
    for host_string in env.roledefs["compute"]:
        with settings(host_string=host_string):
            sudo("mkdir -p ~/.ssh/")
            for host, id_rsa_pub in id_rsa_pubs.items():
                files.append("~/.ssh/authorized_keys", id_rsa_pub, use_sudo=True)
            sudo("chmod 640 ~/.ssh/authorized_keys")
示例#27
0
def join_rabbitmq_cluster(new_ctrl_host):
    """ Task to join a new rabbit server into an existing cluster """
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ['cfgm']
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        #Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')
    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        # copy the erlang cookie from one of the other nodes.
        rabbitmq_cluster_uuid = None
        for host_string in env.roledefs['rabbit']:
            with settings(host_string=host_string, warn_only=True):
                if host_string != new_ctrl_host and\
                   sudo('ls /var/lib/rabbitmq/.erlang.cookie').succeeded:
                    rabbitmq_cluster_uuid = \
                        sudo('cat /var/lib/rabbitmq/.erlang.cookie')
                    break;
        if rabbitmq_cluster_uuid is None:
            raise RuntimeError("Not able to get the Erlang cookie from the cluster nodes")

        execute(listen_at_supervisor_support_port_node, new_ctrl_host)
        execute(remove_mnesia_database_node, new_ctrl_host)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port_node, new_ctrl_host)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute('stop_rabbitmq_and_set_cookie_node', rabbitmq_cluster_uuid, new_ctrl_host)
        execute('start_rabbitmq_node', new_ctrl_host)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(30)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')

        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
示例#28
0
def fix_cmon_param_and_add_keys_to_compute():
    cmon_param = '/etc/contrail/ha/cmon_param'
    compute_host_list = []
    for host_string in env.roledefs['compute']:
        with settings(host_string=host_string, password=env.passwords[host_string]):
            host_name = run('hostname')
        compute_host_list.append(host_name)

    # Get AMQP host list
    amqp_in_role = 'cfgm'
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        amqp_in_role = 'openstack'
    amqp_host_list = []
    for host_string in env.roledefs[amqp_in_role]:
        with settings(host_string=host_string, password=env.passwords[host_string]):
            host_name = run('hostname')
        amqp_host_list.append(host_name)

    computes = 'COMPUTES=("' + '" "'.join(compute_host_list) + '")'
    run("echo '%s' >> %s" % (computes, cmon_param))
    run("echo 'COMPUTES_SIZE=${#COMPUTES[@]}' >> %s" % cmon_param)
    run("echo 'COMPUTES_USER=root' >> %s" % cmon_param)
    run("echo 'PERIODIC_RMQ_CHK_INTER=60' >> %s" % cmon_param)
    amqps = 'DIPHOSTS=("' + '" "'.join(amqp_host_list) + '")'
    run("echo '%s' >> %s" % (amqps, cmon_param))
    run("echo 'DIPS_HOST_SIZE=${#DIPHOSTS[@]}' >> %s" % cmon_param)
    id_rsa_pubs = {}
    if files.exists('/root/.ssh'):
        run('chmod 700 /root/.ssh')
    if not files.exists('/root/.ssh/id_rsa') and not files.exists('/root/.ssh/id_rsa.pub'):
        run('ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""')
    elif not files.exists('/root/.ssh/id_rsa') or not files.exists('/root/.ssh/id_rsa.pub'):
        run('rm -rf /root/.ssh/id_rsa*')
        run('ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""')
    id_rsa_pubs.update({env.host_string : run('cat /root/.ssh/id_rsa.pub')})
    for host_string in env.roledefs['compute']:
        with settings(host_string=host_string):
            run("mkdir -p /root/.ssh/")
            for host, id_rsa_pub in id_rsa_pubs.items():
                files.append('/root/.ssh/authorized_keys', id_rsa_pub)
            run('chmod 640 /root/.ssh/authorized_keys')
示例#29
0
def purge_node_from_rabbitmq_cluster(del_rabbitmq_node, role):

    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'no' and\
                             role == 'openstack':
        # We are not managing the RabbitMQ server. No-op.
        return

    env.roledefs['rabbit'] = env.roledefs[role]
    del_rabbitmq_ip = hstr_to_ip(del_rabbitmq_node)
    del_rabbitmq_ctrl_ip = hstr_to_ip(get_control_host_string(del_rabbitmq_node))
    if ping_test(del_rabbitmq_node):
        with settings(host_string = del_rabbitmq_node, warn_only = True):
            sudo("rabbitmqctl stop_app")
            sudo("rabbitmqctl reset")
            sudo("service supervisor-support-service stop")
            sudo("mv /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.removed")
            sudo("mv /etc/rabbitmq/rabbitmq.config /etc/rabbitmq/rabbitmq.config.removed")
    else:
        # If the node is not reachable, then delete the node remotely from one
        # of the nodes in the cluster.
        with settings(host_string = env.roledefs['rabbit'][0], warn_only = True):
            hostname = local('getent hosts %s | awk \'{print $3\'}' % del_rabbitmq_ctrl_ip, capture = True)
            sudo("rabbitmqctl forget_cluster_node rabbit@%s" % hostname)

    # Giving some time for the other nodes to re-adjust the cluster, 
    time.sleep(30)

    execute(config_rabbitmq)
    for host_string in env.roledefs[role]:
        with settings(host_string = host_string):
            sudo("service rabbitmq-server restart")
            # Give time for RabbitMQ to recluster
            time.sleep(30)

    result = execute(verify_cluster_status)
    if False in result.values():
        print "Unable to recluster RabbitMQ cluster after removing the node %s" % del_rabbitmq_node
        exit(1)
示例#30
0
def purge_node_from_rabbitmq_cluster(del_rabbitmq_node, role):

    if get_from_testbed_dict("openstack", "manage_amqp", "no") == "no" and role == "openstack":
        # We are not managing the RabbitMQ server. No-op.
        return

    env.roledefs["rabbit"] = env.roledefs[role]
    del_rabbitmq_ip = hstr_to_ip(del_rabbitmq_node)
    del_rabbitmq_ctrl_ip = hstr_to_ip(get_control_host_string(del_rabbitmq_node))
    if ping_test(del_rabbitmq_node):
        with settings(host_string=del_rabbitmq_node, warn_only=True):
            sudo("rabbitmqctl stop_app")
            sudo("rabbitmqctl reset")
            sudo("service supervisor-support-service stop")
            sudo("mv /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.removed")
            sudo("mv /etc/rabbitmq/rabbitmq.config /etc/rabbitmq/rabbitmq.config.removed")
    else:
        # If the node is not reachable, then delete the node remotely from one
        # of the nodes in the cluster.
        with settings(host_string=env.roledefs["rabbit"][0], warn_only=True):
            hostname = local("getent hosts %s | awk '{print $3'}" % del_rabbitmq_ctrl_ip, capture=True)
            sudo("rabbitmqctl forget_cluster_node rabbit@%s" % hostname)

    # Giving some time for the other nodes to re-adjust the cluster,
    time.sleep(30)

    execute(config_rabbitmq)
    for host_string in env.roledefs[role]:
        with settings(host_string=host_string):
            sudo("service rabbitmq-server restart")
            # Give time for RabbitMQ to recluster
            time.sleep(30)

    result = execute(verify_cluster_status)
    if False in result.values():
        print "Unable to recluster RabbitMQ cluster after removing the node %s" % del_rabbitmq_node
        exit(1)
示例#31
0
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    if len(env.roledefs['cfgm']) <= 1:
        print "Single cfgm cluster, skipping rabbitmq cluster setup."
        return

    if not force:
        with settings(warn_only=True):
            result = execute(verify_cluster_status)
        if result and False not in result.values():
            print "RabbitMQ cluster is up and running; No need to cluster again."
            return

    rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
    if not rabbitmq_cluster_uuid:
        rabbitmq_cluster_uuid = uuid.uuid4()

    execute(listen_at_supervisor_config_port)
    execute(remove_mnesia_database)
    execute(verify_cfgm_hostname)
    execute(allow_rabbitmq_port)
    execute(config_rabbitmq)
    execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
    execute(start_rabbitmq)
    #execute(rabbitmqctl_stop_app)
    #execute(rabbitmqctl_reset)
    #execute("rabbitmqctl_start_app_node", env.roledefs['cfgm'][0])
    #execute(add_cfgm_to_rabbitmq_cluster)
    #execute(rabbitmqctl_start_app)
    if get_from_testbed_dict('ha', 'internal_vip', None):
        execute('set_ha_policy_in_rabbitmq')
        execute('set_tcp_keepalive')
    result = execute(verify_cluster_status)
    if False in result.values():
        print "Unable to setup RabbitMQ cluster...."
        exit(1)
示例#32
0
def get_vcenter_port():
    return get_from_testbed_dict('vcenter', 'port', '443')
示例#33
0
def get_vcenter_ip():
    return get_from_testbed_dict('vcenter', 'server', None)
示例#34
0
def get_vcenter_admin_password():
    return get_from_testbed_dict('vcenter', 'password', 'Contrail123!')
示例#35
0
def get_vcenter_admin_user():
    return get_from_testbed_dict('vcenter', 'username', '*****@*****.**')