Example #1
0
def verify_collector():
    if not is_xenial_or_above():
        verify_service("supervisor-analytics")
        #To-do: to be verified once contrail-collector works on Ubuntu16.04
        verify_service("contrail-collector")
    verify_service("contrail-analytics-api")
    verify_service("contrail-query-engine")
Example #2
0
def verify_openstack():
    openstack_services = get_openstack_services()
    if not is_xenial_or_above():
        verify_service(openstack_services["keystone"])
    insecure_flag = ''
    if keystone_ssl_enabled() and get_keystone_insecure_flag():
        insecure_flag = '--insecure'
    for x in xrange(10):
        with settings(warn_only=True):
            if is_xenial_or_above():
                 output = sudo("source /etc/contrail/openstackrc; openstack %s project list" % insecure_flag)
            else:
                 output = sudo("source /etc/contrail/openstackrc; keystone %s tenant-list" % insecure_flag)
        if output.failed:
            sleep(10)
        else:
            return
    raise OpenStackSetupError(output)
def setup_rabbitmq_cluster(force=False):
    """Task to cluster the rabbit servers."""
    amqp_roles = []
    rabbit_servers = get_from_testbed_dict('cfgm', 'amqp_hosts', None)
    if rabbit_servers:
        print "Using external rabbitmq servers %s" % rabbit_servers
    else:
        # Provision rabbitmq cluster in cfgm role nodes.
        print "Provisioning rabbitq in cfgm nodes"
        amqp_roles = ['cfgm']

    # Provision rabbitmq cluster in openstack on request
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        # Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')

    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        if not force:
            with settings(warn_only=True):
                result = execute("verify_cluster_status", retry='no')
            if result and False not in result.values():
                print "RabbitMQ cluster is up and running in role[%s]; No need to cluster again." % role
                continue

        rabbitmq_cluster_uuid = getattr(testbed, 'rabbitmq_cluster_uuid', None)
        if not rabbitmq_cluster_uuid:
            rabbitmq_cluster_uuid = uuid.uuid4()

        if not is_xenial_or_above():
            execute(listen_at_supervisor_support_port)
        execute(remove_mnesia_database)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute("stop_rabbitmq_and_set_cookie", rabbitmq_cluster_uuid)
        execute(start_rabbitmq)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(60)
        #execute(rabbitmqctl_stop_app)
        #execute(rabbitmqctl_reset)
        #execute("rabbitmqctl_start_app_node", env.roledefs['rabbit'][0])
        #execute(add_node_to_rabbitmq_cluster)
        #execute(rabbitmqctl_start_app)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')
            execute('set_tcp_keepalive_on_compute')
        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Example #4
0
def verify_cfgm():
    verify_service("zookeeper")
    if manage_config_db():
        verify_service("contrail-database", check_return_code=True)
    if not is_xenial_or_above():
        verify_service("supervisor-config")
    verify_service("contrail-api")
    verify_service("contrail-schema")
    if get_orchestrator is 'openstack':
       verify_service("contrail-svc-monitor")
Example #5
0
def get_vcenter_compute_pkgs():
    pkgs = ['nova-compute', 'nova-compute-kvm',
            'python-novaclient', 'python-bitstring',
            'contrail-utils']
    if is_xenial_or_above():
        pkgs += ['openjdk-8-jre-headless'] 
    else:
        pkgs += ['openjdk-7-jre-headless']

    return pkgs
def get_vcenter_compute_pkgs():
    pkgs = [
        'nova-compute', 'nova-compute-kvm', 'python-novaclient',
        'python-bitstring', 'contrail-utils'
    ]
    if is_xenial_or_above():
        pkgs += ['openjdk-8-jre-headless']
    else:
        pkgs += ['openjdk-7-jre-headless']

    return pkgs
Example #7
0
def verify_cfgm():
    verify_service("zookeeper")
    if manage_config_db():
        verify_service("contrail-database", initd_service=True)
    if not is_xenial_or_above():
        verify_service("supervisor-config")
    verify_service("contrail-api")
    verify_service("contrail-discovery")
    verify_service("contrail-schema")
    if get_orchestrator is "openstack":
        verify_service("contrail-svc-monitor")
def stop_collector_node(*args):
    for host_string in args:
        with settings(host_string=host_string, warn_only=True):
            if is_xenial_or_above():
                for svc in [
                        'contrail-analytics-api', 'contrail-alarm-gen',
                        'contrail-analytics-nodemgr', 'contrail-collector',
                        'contrail-topology', 'contrail-snmp-collector'
                ]:
                    sudo('service %s stop' % svc)
            else:
                sudo('service supervisor-analytics stop')
def stop_collector_node(*args):
    for host_string in args:
        with  settings(host_string=host_string, warn_only=True):
            if is_xenial_or_above():
                for svc in ['contrail-analytics-api',
                            'contrail-alarm-gen',
                            'contrail-analytics-nodemgr',
                            'contrail-collector',
                            'contrail-topology',
                            'contrail-snmp-collector']:
                    sudo('service %s stop' % svc)
            else:
                sudo('service supervisor-analytics stop')
Example #10
0
    def connect_to_vcenter(self):
        from pyVim import connect

        if is_xenial_or_above():
            ssl = __import__("ssl")
            context = ssl._create_unverified_context()
            self.service_instance = connect.SmartConnect(host=self.vcenter_server,
                                            user=self.vcenter_username,
                                            pwd=self.vcenter_password,
                                            port=443, sslContext=context)
        else:
            self.service_instance = connect.SmartConnect(host=self.vcenter_server,
                                            user=self.vcenter_username,
                                            pwd=self.vcenter_password,
                                            port=443)

        self.content = self.service_instance.RetrieveContent()
        atexit.register(connect.Disconnect, self.service_instance)
    def connect_to_vcenter(self):
        from pyVim import connect

        if is_xenial_or_above():
            ssl = __import__("ssl")
            context = ssl._create_unverified_context()
            self.service_instance = connect.SmartConnect(host=self.vcenter_server,
                                            user=self.vcenter_username,
                                            pwd=self.vcenter_password,
                                            port=443, sslContext=context)
        else:
            self.service_instance = connect.SmartConnect(host=self.vcenter_server,
                                            user=self.vcenter_username,
                                            pwd=self.vcenter_password,
                                            port=443)

        self.content = self.service_instance.RetrieveContent()
        atexit.register(connect.Disconnect, self.service_instance)
Example #12
0
def join_rabbitmq_cluster(new_ctrl_host):
    """ Task to join a new rabbit server into an existing cluster """
    # Provision rabbitmq cluster in cfgm role nodes.
    amqp_roles = ['cfgm']
    if get_from_testbed_dict('openstack', 'manage_amqp', 'no') == 'yes':
        #Provision rabbitmq cluster in openstack role nodes aswell.
        amqp_roles.append('openstack')
    for role in amqp_roles:
        env.roledefs['rabbit'] = env.roledefs[role]

        # copy the erlang cookie from one of the other nodes.
        rabbitmq_cluster_uuid = None
        for host_string in env.roledefs['rabbit']:
            with settings(host_string=host_string, warn_only=True):
                if host_string != new_ctrl_host and\
                   sudo('ls /var/lib/rabbitmq/.erlang.cookie').succeeded:
                    rabbitmq_cluster_uuid = \
                        sudo('cat /var/lib/rabbitmq/.erlang.cookie')
                    break;
        if rabbitmq_cluster_uuid is None:
            raise RuntimeError("Not able to get the Erlang cookie from the cluster nodes")

        if not is_xenial_or_above():
            execute(listen_at_supervisor_support_port_node, new_ctrl_host)
        execute(remove_mnesia_database_node, new_ctrl_host)
        execute(verify_rabbit_node_hostname)
        execute(allow_rabbitmq_port_node, new_ctrl_host)
        execute(rabbitmq_env)
        execute(config_rabbitmq)
        execute('stop_rabbitmq_and_set_cookie_node', rabbitmq_cluster_uuid, new_ctrl_host)
        execute('start_rabbitmq_node', new_ctrl_host)
        # adding sleep to workaround rabbitmq bug 26370 prevent
        # "rabbitmqctl cluster_status" from breaking the database,
        # this is seen in ci
        time.sleep(30)
        if (role is 'openstack' and get_openstack_internal_vip() or
            role is 'cfgm' and get_contrail_internal_vip()):
            execute('set_ha_policy_in_rabbitmq')
            execute('set_tcp_keepalive')

        result = execute(verify_cluster_status)
        if False in result.values():
            print "Unable to setup RabbitMQ cluster in role[%s]...." % role
            exit(1)
Example #13
0
def verify_service(service, check_return_code=False):
    for x in xrange(10):
        with settings(warn_only=True):
            if is_xenial_or_above():
                output = sudo("systemctl is-active %s" % service)
                check_return_code = True
            else:
                output = sudo("service %s status" % service)
        if check_return_code:
            if output.succeeded or re.search('Active:.*active', output):
                return
            else:
                sleep(20)
        else:
            if 'running' in output.lower():
                return
            else:
                sleep(20)
    raise SystemExit("Service %s not running." % service)
Example #14
0
def verify_database():
    if not is_xenial_or_above():
        verify_service("supervisor-database")
        verify_service("contrail-database", initd_service=False)
Example #15
0
def verify_control():
    if not is_xenial_or_above():
        verify_service("supervisor-control")
    verify_service("contrail-control")
Example #16
0
def verify_compute():
    if not is_xenial_or_above():
        verify_service("supervisor-vrouter")
Example #17
0
def verify_webui():
    if not is_xenial_or_above():
        verify_service("supervisor-webui")
Example #18
0
def verify_database():
    if not is_xenial_or_above():
        verify_service("supervisor-database")
    verify_service("contrail-database", check_return_code=True)