Esempio n. 1
0
 def set_rest_endpoint(self):
     if orch_env == ENV_K8S_SINGLE_NODE:
         self.rest_endpoint = get_pod_ip('voltha') + ':8443'
     else:
         self.rest_endpoint = get_endpoint_from_consul(
             LOCAL_CONSUL, 'voltha-envoy-8443')
     self.base_url = 'https://' + self.rest_endpoint
Esempio n. 2
0
    def __init__(self, consul_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = None
        self.consul_endpoint = consul_endpoint
        retrys = 10
        while True:
            try:
                self.kafka_endpoint = get_endpoint_from_consul(
                    self.consul_endpoint, 'kafka')
                break
            except:
                log.error("unable-to-communicate-with-consul")
                self.stop()
            retrys -= 1
            if retrys == 0:
                log.error("unable-to-communicate-with-consul")
                self.stop()
            time.sleep(10)
        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 3
0
    def _get_kafka_producer(self):

        try:

            if self.kafka_endpoint.startswith('@'):
                try:
                    _k_endpoint = get_endpoint_from_consul(
                        self.consul_endpoint, self.kafka_endpoint[1:])
                    log.debug('found-kafka-service', endpoint=_k_endpoint)

                except Exception as e:
                    log.exception('no-kafka-service-in-consul', e=e)

                    self.kproducer = None
                    self.kclient = None
                    return
            else:
                _k_endpoint = self.kafka_endpoint
            self.kproducer = _kafkaProducer({
                'bootstrap.servers': _k_endpoint,
            })
            pass
        except Exception, e:
            log.exception('failed-get-kafka-producer', e=e)
            return
Esempio n. 4
0
class TestGlobalNegativeCases(RestBase):

    # Retrieve details of the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    # ~~~~~~~~~~~~~~~~~~~~~~~~~~ NEGATIVE TEST CASES ~~~~~~~~~~~~~~~~~~~~~~~~~~

    def test_03_negative_behavior(self):
        self._invalid_url()
        self._instance_not_found()
        self._logical_device_not_found()
        self._device_not_found()

    def _invalid_url(self):
        self.get('/some_invalid_url', expected_http_code=404)

    def _instance_not_found(self):
        self.get('/api/v1/instances/nay',
                 expected_http_code=200,
                 grpc_status=5)

    def _logical_device_not_found(self):
        self.get('/api/v1/logical_devices/nay',
                 expected_http_code=200,
                 grpc_status=5)

    def _device_not_found(self):
        self.get('/api/v1/devices/nay', expected_http_code=200, grpc_status=5)
Esempio n. 5
0
    def _get_kafka_producer(self):
        # PRODUCER_ACK_LOCAL_WRITE : server will wait till the data is written
        #  to a local log before sending response
        try:

            if self.kafka_endpoint.startswith('@'):
                try:
                    _k_endpoint = get_endpoint_from_consul(self.consul_endpoint,
                                                           self.kafka_endpoint[1:])
                    log.debug('found-kafka-service', endpoint=_k_endpoint)

                except Exception as e:
                    log.exception('no-kafka-service-in-consul', e=e)

                    self.kproducer = None
                    self.kclient = None
                    return
            else:
                _k_endpoint = self.kafka_endpoint

            self.kclient = _KafkaClient(_k_endpoint)
            self.kproducer = _kafkaProducer(self.kclient,
                                            req_acks=PRODUCER_ACK_LOCAL_WRITE,
                                            ack_timeout=self.ack_timeout,
                                            max_req_attempts=self.max_req_attempts)
        except Exception, e:
            log.exception('failed-get-kafka-producer', e=e)
            return
Esempio n. 6
0
    def __init__(self, consul_endpoint, topic="voltha.heartbeat", runtime=60):
        self.topic = topic
        self.runtime = runtime
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 7
0
    def __init__(self, consul_endpoint, topic="voltha.heartbeat", runtime=60):
        self.topic = topic
        self.runtime = runtime
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 8
0
 def resolve_endpoint(self, endpoint):
     ip_port_endpoint = endpoint
     if endpoint.startswith('@'):
         try:
             ip_port_endpoint = get_endpoint_from_consul(
                 self.consul_endpoint, endpoint[1:])
             log.info('endpoint-found',
                      endpoint=endpoint, ip_port=ip_port_endpoint)
         except Exception as e:
             log.error('service-not-found-in-consul', endpoint=endpoint,
                       exception=repr(e))
             return None, None
     if ip_port_endpoint:
         host, port = ip_port_endpoint.split(':', 2)
         return host, int(port)
Esempio n. 9
0
 def resolve_endpoint(self, endpoint):
     ip_port_endpoint = endpoint
     if endpoint.startswith('@'):
         try:
             ip_port_endpoint = get_endpoint_from_consul(
                 self.consul_endpoint, endpoint[1:])
             log.info('endpoint-found',
                      endpoint=endpoint, ip_port=ip_port_endpoint)
         except Exception as e:
             log.error('service-not-found-in-consul', endpoint=endpoint,
                       exception=repr(e))
             return None, None
     if ip_port_endpoint:
         host, port = ip_port_endpoint.split(':', 2)
         return host, int(port)
Esempio n. 10
0
 def resolve_endpoint(self, endpoint):
     ip_port_endpoint = endpoint
     if endpoint.startswith('@'):
         try:
             ip_port_endpoint = get_endpoint_from_consul(
                 self.consul_endpoint, endpoint[1:])
             log.info(
                 '{}-service-endpoint-found'.format(endpoint), address=ip_port_endpoint)
         except Exception as e:
             log.error('{}-service-endpoint-not-found'.format(endpoint), exception=repr(e))
             log.error('committing-suicide')
             # Committing suicide in order to let docker restart ofagent
             os.system("kill -15 {}".format(os.getpid()))
     if ip_port_endpoint:
         host, port = ip_port_endpoint.split(':', 2)
         return host, int(port)
Esempio n. 11
0
 def resolve_endpoint(self, endpoint):
     ip_port_endpoint = endpoint
     if endpoint.startswith('@'):
         try:
             ip_port_endpoint = get_endpoint_from_consul(
                 self.consul_endpoint, endpoint[1:])
             log.info(
                 '{}-service-endpoint-found'.format(endpoint), address=ip_port_endpoint)
         except Exception as e:
             log.error('{}-service-endpoint-not-found'.format(endpoint), exception=repr(e))
             log.error('committing-suicide')
             # Committing suicide in order to let docker restart ofagent
             os.system("kill -15 {}".format(os.getpid()))
     if ip_port_endpoint:
         host, port = ip_port_endpoint.split(':', 2)
         return host, int(port)
Esempio n. 12
0
 def resolve_endpoint(self, endpoint):
     ip_port_endpoint = endpoint
     if endpoint.startswith('@'):
         try:
             ip_port_endpoint = get_endpoint_from_consul(
                 self.consul_endpoint, endpoint[1:])
             log.info('Found endpoint {} service at {}'.format(
                 endpoint, ip_port_endpoint))
         except Exception as e:
             log.error('Failure to locate {} service from '
                       'consul {}:'.format(endpoint, repr(e)))
             log.error('Committing suicide...')
             # Committing suicide in order to let docker restart ofagent
             os.system("kill -15 {}".format(os.getpid()))
     if ip_port_endpoint:
         host, port = ip_port_endpoint.split(':', 2)
         return host, int(port)
Esempio n. 13
0
    def __init__(self, consul_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')
        # print('kafka endpoint: ', self.kafka_endpoint)
        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 14
0
    def __init__(self, consul_endpoint, kafka_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = kafka_endpoint
        self.consul_endpoint = consul_endpoint

        if kafka_endpoint.startswith('@'):
            retrys = 10
            while True:
                try:
                    self.kafka_endpoint = get_endpoint_from_consul(
                        self.consul_endpoint, kafka_endpoint[1:])
                    break
                except:
                    log.error("unable-to-communicate-with-consul")
                    self.stop()
                retrys -= 1
                if retrys == 0:
                    log.error("unable-to-communicate-with-consul")
                    self.stop()
                time.sleep(10)

        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 15
0
    def _get_kafka_producer(self):
        # PRODUCER_ACK_LOCAL_WRITE : server will wait till the data is written
        #  to a local log before sending response

        if self.kafka_endpoint.startswith('@'):
            try:
                _k_endpoint = get_endpoint_from_consul(self.consul_endpoint,
                                                       self.kafka_endpoint[1:])
                log.debug('found-kafka-service', endpoint=_k_endpoint)

            except Exception as e:
                log.exception('no-kafka-service-in-consul', e=e)

                self.kproducer = None
                self.kclient = None
                return
        else:
            _k_endpoint = self.kafka_endpoint

        self.kclient = _KafkaClient(_k_endpoint)
        self.kproducer = _kafkaProducer(self.kclient,
                                        req_acks=PRODUCER_ACK_LOCAL_WRITE,
                                        ack_timeout=self.ack_timeout,
                                        max_req_attempts=self.max_req_attempts)
Esempio n. 16
0
    def test_07_start_all_containers(self):
        print "Test_07_start_all_containers_Start:------------------ "
        t0 = time.time()

        try:
            # Pre-test - clean up all running docker containers
            print "Pre-test: Removing all running containers ..."
            cmd = command_defs['docker_compose_stop']
            _, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            cmd = command_defs['docker_compose_rm_f']
            _, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)

            # get a list of services in the docker-compose file
            print "Getting list of services in docker compose file ..."
            cmd = command_defs['docker_compose_services']
            services, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            docker_service_list = services.split()
            self.assertGreaterEqual(len(docker_service_list),
                                    DOCKER_COMPOSE_FILE_SERVICES_COUNT)

            # start all the containers
            print "Starting all containers ..."
            cmd = command_defs['docker_compose_start_all']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)

            # Instead of using only a fixed timeout:
            #   1) wait until the services are ready (polling per second)
            #   2) bail out after a longer timeout.
            print "Waiting for all containers to be ready ..."
            self.wait_till('Not all services are up',
                           self._is_voltha_ensemble_ready,
                           interval=1,
                           timeout=30)

            # verify that all containers are running
            print "Verify all services are running using docker command ..."
            for service in docker_service_list:
                cmd = command_defs['docker_compose_ps'] + ' {} | wc -l'.format(
                    service)
                out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
                self.assertEqual(rc, 0)
                self.assertGreaterEqual(out, 3)  # 2 are for headers

            # Verify that 'docker ps' return the same number of running process
            cmd = command_defs['docker_ps_count']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            self.assertGreaterEqual(out.split(), [str(len(
                docker_service_list))])

            # Retrieve the list of services from consul and validate against
            # the list obtained from docker composed
            print "Verify all services are registered in consul ..."
            expected_services = ['consul-rest', 'fluentd-intake',
                                 'voltha-grpc',
                                 'voltha-health',
                                 'consul-8600', 'zookeeper', 'consul',
                                 'kafka']

            cmd = command_defs['consul_get_services']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            try:
                consul_services = json.loads(out)
                intersected_services = [s for s in expected_services if
                                        s in consul_services]
                self.assertEqual(len(intersected_services),
                                 len(expected_services))
                # services_match = 0
                # for d_service in docker_service_list:
                #     for c_service in consul_services:
                #         if c_service.find(d_service) != -1:
                #             services_match += 1
                #             print d_service, c_service
                #             break
                # self.assertEqual(services_match, len(docker_service_list))
            except Exception as e:
                self.assertRaises(e)

            # Verify the service record of the voltha service
            print "Verify the service record of voltha in consul ..."
            expected_srv_elements = ['ModifyIndex', 'CreateIndex',
                                     'ServiceEnableTagOverride', 'Node',
                                     'Address', 'TaggedAddresses', 'ServiceID',
                                     'ServiceName', 'ServiceTags',
                                     'ServiceAddress', 'ServicePort']
            cmd = command_defs['consul_get_srv_voltha_health']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            try:
                srv = json.loads(out)
                intersect_elems = [e for e in srv[0] if
                                   e in expected_srv_elements]
                self.assertEqual(len(expected_srv_elements),
                                 len(intersect_elems))
            except Exception as e:
                self.assertRaises(e)

            # Verify kafka client is receiving the messages
            print "Verify kafka client has heartbeat topic ..."
            expected_pattern = ['voltha.heartbeat']
            kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL,'kafka')
            cmd = command_defs['kafka_client_run'].format(kafka_endpoint)
            kafka_client_output = run_long_running_command_with_timeout(cmd, 20)

            # Verify the kafka client output
            # instance id
            found = False
            for out in kafka_client_output:
                if all(ep for ep in expected_pattern if ep in out):
                    found = True
                    break
            self.assertTrue(found)

            # Commented the heartbeat messages from voltha as on Jenkins this
            # test fails more often than not.   On local or cluster environment
            # the kafka event bus works well.

            # verify docker-compose logs are being produced - just get the
            # first work of each line
            print "Verify docker compose logs has output from all the services " \
                  "..."
            expected_output = ['voltha_1', 'fluentd_1', 'consul_1',
                               'registrator_1', 'kafka_1', 'zookeeper_1',
                               'ofagent_1', 'netconf_1']
            cmd = command_defs['docker_compose_logs']
            docker_compose_logs = run_long_running_command_with_timeout(cmd, 5, 0)
            intersected_logs = [l for l in expected_output if
                                l in docker_compose_logs]
            self.assertEqual(len(intersected_logs), len(expected_output))

            # verify docker voltha logs are being produced - we will just verify
            # some
            # key messages in the logs
            print "Verify docker voltha logs are produced ..."
            expected_output = ['coordinator._renew_session', 'main.heartbeat']
            cmd = command_defs['docker_voltha_logs']
            docker_voltha_logs = run_long_running_command_with_timeout(cmd,
                                                                       0.5, 5)
            intersected_logs = [l for l in expected_output if
                                l in docker_voltha_logs]
            self.assertEqual(len(intersected_logs), len(expected_output))

        finally:
            print "Stopping all containers ..."
            # clean up all created containers for this test
            #self._stop_and_remove_all_containers()
            cmd = command_defs['docker_compose_down']
            _, err, rc = run_command_to_completion_with_raw_stdout(cmd)

            print "Test_07_start_all_containers_End:------------------ took {}" \
                  " secs \n\n".format(time.time() - t0)
Esempio n. 17
0
    def test_07_start_all_containers(self):
        print "Test_07_start_all_containers_Start:------------------ "
        t0 = time.time()

        try:
            # Pre-test - clean up all running docker containers
            print "Pre-test: Removing all running containers ..."
            cmd = command_defs['docker_compose_stop']
            _, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            cmd = command_defs['docker_compose_rm_f']
            _, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)

            # get a list of services in the docker-compose file
            print "Getting list of services in docker compose file ..."
            cmd = command_defs['docker_compose_services']
            services, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            docker_service_list = services.split()
            self.assertGreaterEqual(len(docker_service_list),
                                    DOCKER_COMPOSE_FILE_SERVICES_COUNT)

            # start all the containers
            print "Starting all containers ..."
            cmd = command_defs['docker_compose_start_all']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)

            # Instead of using only a fixed timeout:
            #   1) wait until the services are ready (polling per second)
            #   2) bail out after a longer timeout.
            print "Waiting for all containers to be ready ..."
            self.wait_till('Not all services are up',
                           self._is_voltha_ensemble_ready,
                           interval=1,
                           timeout=30)

            # verify that all containers are running
            print "Verify all services are running using docker command ..."
            for service in docker_service_list:
                cmd = command_defs['docker_compose_ps'] + ' {} | wc -l'.format(
                    service)
                out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
                self.assertEqual(rc, 0)
                self.assertGreaterEqual(out, 3)  # 2 are for headers

            # Verify that 'docker ps' return the same number of running process
            cmd = command_defs['docker_ps_count']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            self.assertGreaterEqual(out.split(),
                                    [str(len(docker_service_list))])

            # Retrieve the list of services from consul and validate against
            # the list obtained from docker composed
            print "Verify all services are registered in consul ..."
            expected_services = [
                'consul-rest', 'fluentd-intake', 'voltha-grpc',
                'voltha-health', 'consul-8600', 'zookeeper', 'consul', 'kafka'
            ]

            cmd = command_defs['consul_get_services']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            try:
                consul_services = json.loads(out)
                intersected_services = [
                    s for s in expected_services if s in consul_services
                ]
                self.assertEqual(len(intersected_services),
                                 len(expected_services))
                # services_match = 0
                # for d_service in docker_service_list:
                #     for c_service in consul_services:
                #         if c_service.find(d_service) != -1:
                #             services_match += 1
                #             print d_service, c_service
                #             break
                # self.assertEqual(services_match, len(docker_service_list))
            except Exception as e:
                self.assertRaises(e)

            # Verify the service record of the voltha service
            print "Verify the service record of voltha in consul ..."
            expected_srv_elements = [
                'ModifyIndex', 'CreateIndex', 'ServiceEnableTagOverride',
                'Node', 'Address', 'TaggedAddresses', 'ServiceID',
                'ServiceName', 'ServiceTags', 'ServiceAddress', 'ServicePort'
            ]
            cmd = command_defs['consul_get_srv_voltha_health']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            try:
                srv = json.loads(out)
                intersect_elems = [
                    e for e in srv[0] if e in expected_srv_elements
                ]
                self.assertEqual(len(expected_srv_elements),
                                 len(intersect_elems))
            except Exception as e:
                self.assertRaises(e)

            # Verify kafka client is receiving the messages
            print "Verify kafka client has heartbeat topic ..."
            expected_pattern = ['voltha.heartbeat']
            kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'kafka')
            cmd = command_defs['kafka_client_run'].format(kafka_endpoint)
            kafka_client_output = run_long_running_command_with_timeout(
                cmd, 20)

            # Verify the kafka client output
            # instance id
            found = False
            for out in kafka_client_output:
                if all(ep for ep in expected_pattern if ep in out):
                    found = True
                    break
            self.assertTrue(found)

            # Commented the heartbeat messages from voltha as on Jenkins this
            # test fails more often than not.   On local or cluster environment
            # the kafka event bus works well.

            # verify docker-compose logs are being produced - just get the
            # first work of each line
            print "Verify docker compose logs has output from all the services " \
                  "..."
            expected_output = [
                'voltha_1', 'fluentd_1', 'consul_1', 'registrator_1',
                'kafka_1', 'zookeeper_1', 'ofagent_1', 'netconf_1'
            ]
            cmd = command_defs['docker_compose_logs']
            docker_compose_logs = run_long_running_command_with_timeout(
                cmd, 5, 0)
            intersected_logs = [
                l for l in expected_output if l in docker_compose_logs
            ]
            self.assertEqual(len(intersected_logs), len(expected_output))

            # verify docker voltha logs are being produced - we will just verify
            # some
            # key messages in the logs
            print "Verify docker voltha logs are produced ..."
            self.wait_till('Basic voltha logs are absent',
                           self._is_basic_voltha_logs_produced,
                           interval=1,
                           timeout=30)

        finally:
            print "Stopping all containers ..."
            # clean up all created containers for this test
            #self._stop_and_remove_all_containers()
            cmd = command_defs['docker_compose_down']
            _, err, rc = run_command_to_completion_with_raw_stdout(cmd)

            print "Test_07_start_all_containers_End:------------------ took {}" \
                  " secs \n\n".format(time.time() - t0)
Esempio n. 18
0
host_and_port = '172.17.0.1:50060'

#for ordering the test cases
id = 3
LOCAL_CONSUL = "localhost:8500"

orch_env = 'docker-compose'
if 'test_parameters' in config and 'orch_env' in config['test_parameters']:
    orch_env = config['test_parameters']['orch_env']
print 'orchestration-environment: %s' % orch_env

# Retrieve details of the REST entry point
if orch_env == 'k8s-single-node':
    rest_endpoint = get_pod_ip('voltha') + ':8443'
else:
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'voltha-envoy-8443')
# Construct the base_url
BASE_URL = 'https://' + rest_endpoint


class GlobalPreChecks(RestBase):
    base_url = BASE_URL

    # def test_000_get_root(self):
    #     res = self.get('/#!/', expected_content_type='text/html')
    #     self.assertGreaterEqual(res.find('swagger'), 0)

    def test_001_get_health(self):
        res = self.get('/health')
        self.assertEqual(res['state'], 'HEALTHY')
Esempio n. 19
0
    def test_07_start_all_containers(self):
        print "Test_07_start_all_containers_Start:------------------ "
        t0 = time.time()

        try:
            # Pre-test - clean up all running docker containers
            print "Pre-test: Removing all running containers ..."
            self._stop_and_remove_all_containers()

            # get a list of services in the docker-compose file
            print "Getting list of services in docker compose file ..."
            cmd = command_defs['docker_compose_services']
            services, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            docker_service_list = services.split()
            self.assertGreaterEqual(len(docker_service_list),
                                    DOCKER_COMPOSE_FILE_SERVICES_COUNT)

            # start all the containers
            print "Starting all containers ..."
            cmd = command_defs['docker_compose_start_all']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)

            print "Waiting for all containers to be ready ..."
            time.sleep(10)
            rc = verify_all_services_healthy(LOCAL_CONSUL)
            if not rc:
                print "Not all services are up"
            self.assertEqual(rc, True)

            # verify that all containers are running
            print "Verify all services are running using docker command ..."
            for service in docker_service_list:
                cmd = command_defs['docker_compose_ps'] + ' {} | wc -l'.format(
                    service)
                out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
                self.assertEqual(rc, 0)
                self.assertGreaterEqual(out, 3)  # 2 are for headers

            # Verify that 'docker ps' return the same number of running process
            cmd = command_defs['docker_ps_count']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            self.assertGreaterEqual(out.split(),
                                    [str(len(docker_service_list))])

            # Retrieve the list of services from consul and validate against
            # the list obtained from docker composed
            print "Verify all services are registered in consul ..."
            expected_services = [
                'consul-rest', 'fluentd-intake', 'chameleon-rest',
                'voltha-grpc', 'voltha-health', 'consul-8600', 'zookeeper',
                'consul', 'kafka'
            ]

            cmd = command_defs['consul_get_services']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            try:
                consul_services = json.loads(out)
                intersected_services = [
                    s for s in expected_services if s in consul_services
                ]
                self.assertEqual(len(intersected_services),
                                 len(expected_services))
                # services_match = 0
                # for d_service in docker_service_list:
                #     for c_service in consul_services:
                #         if c_service.find(d_service) != -1:
                #             services_match += 1
                #             print d_service, c_service
                #             break
                # self.assertEqual(services_match, len(docker_service_list))
            except Exception as e:
                self.assertRaises(e)

            # Verify the service record of the voltha service
            print "Verify the service record of voltha in consul ..."
            expected_srv_elements = [
                'ModifyIndex', 'CreateIndex', 'ServiceEnableTagOverride',
                'Node', 'Address', 'TaggedAddresses', 'ServiceID',
                'ServiceName', 'ServiceTags', 'ServiceAddress', 'ServicePort'
            ]
            cmd = command_defs['consul_get_srv_voltha_health']
            out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
            self.assertEqual(rc, 0)
            try:
                srv = json.loads(out)
                intersect_elems = [
                    e for e in srv[0] if e in expected_srv_elements
                ]
                self.assertEqual(len(expected_srv_elements),
                                 len(intersect_elems))
            except Exception as e:
                self.assertRaises(e)

            # Verify kafka client is receiving the messages
            print "Verify kafka client has heartbeat topic ..."
            expected_pattern = ['voltha.heartbeat']
            kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'kafka')
            cmd = command_defs['kafka_client_run'].format(kafka_endpoint)
            kafka_client_output = run_long_running_command_with_timeout(
                cmd, 20)

            # TODO check that there are heartbeats
            # Verify the kafka client output
            # instance id
            found = False
            for out in kafka_client_output:
                if all(ep for ep in expected_pattern if ep in out):
                    found = True
                    break
            self.assertTrue(found)

            print "Verify kafka client is receiving the heartbeat messages from voltha..."
            expected_pattern = ['heartbeat', 'compose_voltha_1']
            cmd = command_defs['kafka_client_heart_check'].format(
                kafka_endpoint)
            kafka_client_output = run_long_running_command_with_timeout(
                cmd, 20)

            print kafka_client_output
            # TODO check that there are heartbeats
            # Verify the kafka client output
            # instance id
            found = False
            for out in kafka_client_output:
                if all(ep for ep in expected_pattern if ep in out):
                    found = True
                    break
            self.assertTrue(found)

            # verify docker-compose logs are being produced - just get the
            # first work of each line
            print "Verify docker compose logs has output from all the services " \
                  "..."
            expected_output = [
                'voltha_1', 'fluentd_1', 'consul_1', 'registrator_1',
                'kafka_1', 'zookeeper_1', 'chameleon_1', 'ofagent_1',
                'netconf_1'
            ]
            cmd = command_defs['docker_compose_logs']
            docker_compose_logs = run_long_running_command_with_timeout(
                cmd, 5, 0)
            intersected_logs = [
                l for l in expected_output if l in docker_compose_logs
            ]
            self.assertEqual(len(intersected_logs), len(expected_output))

            # TODO: file in /tmp/fluentd/ cannot be found
            # # verify fluentd logs are being produced - we will just verify
            # that there are "voltha.logging" in the logs
            # os.environ["PYTHONPATH"] += os.pathsep + "/tmp/fluentd/"
            # os.environ['PATH'] += os.pathsep + "/tmp/fluentd/"
            # expected_output=['voltha.logging']
            # cmd = command_defs['fluentd_logs']
            # fluentd_logs, err = run_command_to_completion_with_raw_stdout(cmd)
            # # self.assertIsNone(err)
            # print err
            # intersected_logs = [l for l in expected_output if
            #                         l in fluentd_logs]
            # self.assertEqual(len(intersected_logs), len(expected_output))

            # verify docker voltha logs are being produced - we will just verify
            # some
            # key messages in the logs
            print "Verify docker voltha logs are produced ..."
            expected_output = [
                'kafka_proxy.send_message', 'coordinator._renew_session',
                'main.heartbeat'
            ]
            cmd = command_defs['docker_voltha_logs']
            docker_voltha_logs = run_long_running_command_with_timeout(
                cmd, 0.5, 3)
            intersected_logs = [
                l for l in expected_output if l in docker_voltha_logs
            ]
            self.assertEqual(len(intersected_logs), len(expected_output))

        finally:
            print "Stopping all containers ..."
            # clean up all created containers for this test
            self._stop_and_remove_all_containers()

            print "Test_07_start_all_containers_End:------------------ took {}" \
                  " secs \n\n".format(time.time() - t0)
class VolthaImageDownloadUpdate(RestBase):
    # Retrieve details on the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'chameleon-rest')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    def wait_till(self, msg, predicate, interval=0.1, timeout=5.0):
        deadline = time() + timeout
        while time() < deadline:
            if predicate():
                return
            sleep(interval)
        self.fail('Timed out while waiting for condition: {}'.format(msg))

    def setUp(self):
        # Make sure the Voltha REST interface is available
        self.verify_rest()
        # Create a new device
        device = self.add_device()
        # Activate the new device
        self.activate_device(device['id'])
        self.device_id = device['id']
        print("self.device_id {}".format(self.device_id))
        assert(self.device_id)

        # wait untill device moves to ACTIVE state
        self.wait_till(
            'admin state moves from ACTIVATING to ACTIVE',
            lambda: self.get('/api/v1/devices/{}'.format(self.device_id))\
                    ['oper_status'] in ('ACTIVE'),
            timeout=5.0)
        # wait until ONUs are detected
        sleep(2.0)

    def tearDown(self):
        # Disable device
        #self.disable_device(self.device_id)
        # Delete device
        #self.delete_device(self.device_id)
        pass

    # test cases

    def test_voltha_global_download_image(self):
        name = 'image-1'
        self.request_download_image(name)
        self.verify_request_download_image(name)
        self.cancel_download_image(name)
        self.verify_list_download_images(0)

        name = 'image-2'
        self.request_download_image(name)
        self.verify_request_download_image(name)
        self.get_download_image_status(name)
        self.verify_successful_download_image(name)
        self.activate_image(name)
        self.verify_activate_image(name)
        self.revert_image(name)
        self.verify_revert_image(name)

        name = 'image-3'
        self.request_download_image(name)
        self.verify_request_download_image(name)
        self.verify_list_download_images(2)
        
    def verify_list_download_images(self, num_of_images):
        path = '/api/v1/devices/{}/image_downloads' \
                .format(self.device_id)
        res = self.get(path)
        print(res['items'])
        self.assertEqual(len(res['items']), num_of_images)

    def get_download_image(self, name):
        path = '/api/v1/devices/{}/image_downloads/{}' \
                .format(self.device_id, name)
        response = self.get(path)
        print(response)
        return response

    def request_download_image(self, name):
        path = '/api/v1/devices/{}/image_downloads/{}' \
                .format(self.device_id, name)
        url='http://[user@](hostname)[:port]/(dir)/(filename)'
        request = ImageDownload(id=self.device_id,
                                name=name,
                                image_version="1.1.2",
                                url=url)
        self.post(path, MessageToDict(request),
                   expected_code=200)

    def verify_request_download_image(self, name):
        res = self.get_download_image(name)
        self.assertEqual(res['state'], 'DOWNLOAD_REQUESTED')
        self.assertEqual(res['image_state'], 'IMAGE_UNKNOWN')
        path = '/api/v1/local/devices/{}'.format(self.device_id)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'DOWNLOADING_IMAGE')

    def cancel_download_image(self, name):
        path = '/api/v1/devices/{}/image_downloads/{}' \
                .format(self.device_id, name)
        self.delete(path, expected_code=200)

    def get_download_image_status(self, name):
        path = '/api/v1/devices/{}/image_downloads/{}/status' \
                .format(self.device_id, name)
        response = self.get(path)
        while (response['state'] != 'DOWNLOAD_SUCCEEDED'):
            response = self.get(path)

    def verify_successful_download_image(self, name):
        res = self.get_download_image(name)
        self.assertEqual(res['state'], 'DOWNLOAD_SUCCEEDED')
        self.assertEqual(res['image_state'], 'IMAGE_UNKNOWN')
        path = '/api/v1/local/devices/{}'.format(self.device_id)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

    def activate_image(self, name):
        path = '/api/v1/devices/{}/image_downloads/{}/image_update' \
                .format(self.device_id, name)
        request = ImageDownload(id=self.device_id,
                                name=name,
                                save_config=True,
                                local_dir='/local/images/v.1.1.run')
        self.post(path, MessageToDict(request),
                  expected_code=200)

    def verify_activate_image(self, name):
        res = self.get_download_image(name)
        self.assertEqual(res['image_state'], 'IMAGE_ACTIVE')

    def revert_image(self, name):
        path = '/api/v1/devices/{}/image_downloads/{}/image_revert' \
                .format(self.device_id, name)
        request = ImageDownload(id=self.device_id,
                                name=name,
                                save_config=True,
                                local_dir='/local/images/v.1.1.run')
        self.post(path, MessageToDict(request),
                  expected_code=200)

    def verify_revert_image(self, name):
        res = self.get_download_image(name)
        self.assertEqual(res['image_state'], 'IMAGE_INACTIVE')


    # test helpers

    def verify_rest(self):
        self.get('/api/v1')

    # Create a new simulated device
    def add_device(self):
        device = Device(
            type='simulated_olt',
        )
        device = self.post('/api/v1/local/devices', MessageToDict(device),
                           expected_code=200)
        return device

    # Active the simulated device.
    def activate_device(self, device_id):
        path = '/api/v1/local/devices/{}'.format(device_id)
        self.post(path + '/enable', expected_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

    # Disable the simulated device.
    def disable_device(self, device_id):
        path = '/api/v1/local/devices/{}'.format(device_id)
        self.post(path + '/disable', expected_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'DISABLED')

    # Delete the simulated device
    def delete_device(self, device_id):
        path = '/api/v1/local/devices/{}'.format(device_id)
        self.delete(path + '/delete', expected_code=200)
Esempio n. 21
0
 def set_rest_endpoint(self):
     self.rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL,
                                                   'envoy-8443')
     self.base_url = 'https://' + self.rest_endpoint
Esempio n. 22
0
class VolthaAlarmFilterTests(RestBase):
    # Retrieve details on the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'chameleon-rest')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    # Start by querying consul to get the endpoint details
    kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'kafka')

    # ~~~~~~~~~~~~ Tests ~~~~~~~~~~~~

    def test_1_alarm_topic_exists(self):
        # Produce a message to ensure that the topic exists
        cmd = COMMANDS['kafka_client_send_msg'].format(self.kafka_endpoint)
        run_long_running_command_with_timeout(cmd, 5)

        # We want to make sure that the topic is available on the system
        expected_pattern = ['voltha.alarms']

        # Start the kafka client to retrieve details on topics
        cmd = COMMANDS['kafka_client_run'].format(self.kafka_endpoint)
        kafka_client_output = run_long_running_command_with_timeout(cmd, 20)

        # Loop through the kafka client output to find the topic
        found = False
        for out in kafka_client_output:
            if all(ep in out for ep in expected_pattern):
                found = True
                break

        self.assertTrue(found,
                        'Failed to find topic {}'.format(expected_pattern))

    def test_2_alarm_generated_by_adapter(self):
        # Verify that REST calls can be made
        self.verify_rest()

        # Create a new device
        device_not_filtered = self.add_device()
        device_filtered = self.add_device()

        self.add_device_id_filter(device_filtered['id'])

        # Activate the new device
        self.activate_device(device_not_filtered['id'])
        self.activate_device(device_filtered['id'])

        # The simulated olt devices should start generating alarms periodically

        # We should see alarms generated for the non filtered device
        self.get_alarm_event(device_not_filtered['id'])

        # We should not see any alarms from the filtered device
        self.get_alarm_event(device_filtered['id'], True)

    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    # Make sure the Voltha REST interface is available
    def verify_rest(self):
        self.get('/api/v1')

    # Create a new simulated device
    def add_device(self):
        device = Device(type='simulated_olt', )
        device = self.post('/api/v1/devices',
                           MessageToDict(device),
                           expected_code=200)
        return device

    # Create a filter against a specific device id
    def add_device_id_filter(self, device_id):
        rules = list()
        rule = dict()

        # Create a filter with a single rule
        rule['key'] = 'device_id'
        rule['value'] = device_id
        rules.append(rule)

        alarm_filter = AlarmFilter(rules=rules)
        alarm_filter = self.post('/api/v1/alarm_filters',
                                 MessageToDict(alarm_filter),
                                 expected_code=200)

        return alarm_filter

    # Active the simulated device.
    # This will trigger the simulation of random alarms
    def activate_device(self, device_id):
        path = '/api/v1/devices/{}'.format(device_id)
        self.post(path + '/enable', expected_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

    # Retrieve a sample alarm for a specific device
    def get_alarm_event(self, device_id, expect_failure=False):
        cmd = COMMANDS['kafka_client_alarm_check'].format(self.kafka_endpoint)
        kafka_client_output = run_long_running_command_with_timeout(cmd, 30)

        # Verify the kafka client output
        found = False
        alarm_data = None

        for out in kafka_client_output:
            # Catch any error that might occur while reading the kafka messages
            try:
                alarm_data = simplejson.loads(out)
                print alarm_data

                if not alarm_data or 'resource_id' not in alarm_data:
                    continue
                elif alarm_data['resource_id'] == device_id:
                    found = True
                    break

            except Exception as e:
                continue

        if not expect_failure:
            self.assertTrue(
                found, 'Failed to find kafka alarm with device id:{}'.format(
                    device_id))
        else:
            self.assertFalse(
                found,
                'Found a kafka alarm with device id:{}.  It should have been filtered'
                .format(device_id))

        return alarm_data
Esempio n. 23
0
class GlobalRestCalls(RestBase):
    def wait_till(self, msg, predicate, interval=0.1, timeout=5.0):
        deadline = time() + timeout
        while time() < deadline:
            if predicate():
                return
            sleep(interval)
        self.fail('Timed out while waiting for condition: {}'.format(msg))

    # Retrieve details of the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    def test_01_global_rest_apis(self):
        # ~~~~~~~~~~~~~~~~~~~ GLOBAL TOP-LEVEL SERVICES~ ~~~~~~~~~~~~~~~~~~~~~~
        # self._get_root()
        self._get_schema()
        self._get_health()
        # ~~~~~~~~~~~~~~~~~~~ TOP LEVEL VOLTHA OPERATIONS ~~~~~~~~~~~~~~~~~~~~~
        self._get_voltha()
        self._list_voltha_instances()
        self._get_voltha_instance()
        olt_id = self._add_olt_device()
        self._verify_device_preprovisioned_state(olt_id)
        self._activate_device(olt_id)
        ldev_id = self._wait_for_logical_device(olt_id)
        ldevices = self._list_logical_devices()
        logical_device_id = ldevices['items'][0]['id']
        self._get_logical_device(logical_device_id)
        self._list_logical_device_ports(logical_device_id)
        self._list_and_update_logical_device_flows(logical_device_id)
        self._list_and_update_logical_device_flow_groups(logical_device_id)
        devices = self._list_devices()
        device_id = devices['items'][0]['id']
        self._get_device(device_id)
        self._list_device_ports(device_id)
        self._list_device_flows(device_id)
        self._list_device_flow_groups(device_id)
        dtypes = self._list_device_types()
        self._get_device_type(dtypes['items'][0]['id'])

    def _get_root(self):
        res = self.get('/', expected_content_type='text/html')
        self.assertGreaterEqual(res.find('swagger'), 0)

    def _get_schema(self):
        res = self.get('/schema')
        self.assertEqual(set(res.keys()),
                         {'protos', 'yang_from', 'swagger_from'})

    def _get_health(self):
        res = self.get('/health')
        self.assertEqual(res['state'], 'HEALTHY')

    # ~~~~~~~~~~~~~~~~~~~~~ TOP LEVEL VOLTHA OPERATIONS ~~~~~~~~~~~~~~~~~~~~~~~

    def _get_voltha(self):
        res = self.get('/api/v1')
        self.assertEqual(res['version'], '0.9.0')

    def _list_voltha_instances(self):
        res = self.get('/api/v1/instances')
        self.assertEqual(len(res['items']), 1)

    def _get_voltha_instance(self):
        res = self.get('/api/v1/instances')
        voltha_id = res['items'][0]
        res = self.get('/api/v1/instances/{}'.format(voltha_id))
        self.assertEqual(res['version'], '0.9.0')

    def _add_olt_device(self):
        device = Device(type='simulated_olt', mac_address='00:00:00:00:00:01')
        device = self.post('/api/v1/devices',
                           MessageToDict(device),
                           expected_http_code=200)
        return device['id']

    def _verify_device_preprovisioned_state(self, olt_id):
        # we also check that so far what we read back is same as what we get
        # back on create
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['id'], '')
        self.assertEqual(device['adapter'], 'simulated_olt')
        self.assertEqual(device['admin_state'], 'PREPROVISIONED')
        self.assertEqual(device['oper_status'], 'UNKNOWN')

    def _activate_device(self, olt_id):
        path = '/api/v1/devices/{}'.format(olt_id)
        self.post(path + '/enable', expected_http_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

        self.wait_till('admin state moves to ACTIVATING or ACTIVE',
                       lambda: self.get(path)['oper_status'] in
                       ('ACTIVATING', 'ACTIVE'),
                       timeout=0.5)

        # eventually, it shall move to active state and by then we shall have
        # device details filled, connect_state set, and device ports created
        self.wait_till('admin state ACTIVE',
                       lambda: self.get(path)['oper_status'] == 'ACTIVE',
                       timeout=0.5)
        device = self.get(path)
        images = device['images']
        image = images['image']
        image_1 = image[0]
        version = image_1['version']
        self.assertNotEqual(version, '')
        self.assertEqual(device['connect_status'], 'REACHABLE')

        ports = self.get(path + '/ports')['items']
        self.assertEqual(len(ports), 2)

    def _wait_for_logical_device(self, olt_id):
        # we shall find the logical device id from the parent_id of the olt
        # (root) device
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['parent_id'], '')
        logical_device = self.get('/api/v1/logical_devices/{}'.format(
            device['parent_id']))

        # the logical device shall be linked back to the hard device,
        # its ports too
        self.assertEqual(logical_device['root_device_id'], device['id'])

        logical_ports = self.get('/api/v1/logical_devices/{}/ports'.format(
            logical_device['id']))['items']
        self.assertGreaterEqual(len(logical_ports), 1)
        logical_port = logical_ports[0]
        self.assertEqual(logical_port['id'], 'nni')
        self.assertEqual(logical_port['ofp_port']['name'], 'nni')
        self.assertEqual(logical_port['ofp_port']['port_no'], 129)
        self.assertEqual(logical_port['device_id'], device['id'])
        self.assertEqual(logical_port['device_port_no'], 2)
        return logical_device['id']

    def _list_logical_devices(self):
        res = self.get('/api/v1/logical_devices')
        self.assertGreaterEqual(len(res['items']), 1)
        return res

    def _get_logical_device(self, id):
        res = self.get('/api/v1/logical_devices/{}'.format(id))
        self.assertIsNotNone(res['datapath_id'])

    def _list_logical_device_ports(self, id):
        res = self.get('/api/v1/logical_devices/{}/ports'.format(id))
        self.assertGreaterEqual(len(res['items']), 1)

    def _list_and_update_logical_device_flows(self, id):

        # retrieve flow list
        res = self.get('/api/v1/logical_devices/{}/flows'.format(id))
        len_before = len(res['items'])

        # add some flows
        req = ofp.FlowTableUpdate(id=id,
                                  flow_mod=mk_simple_flow_mod(
                                      cookie=randint(1, 10000000000),
                                      priority=len_before,
                                      match_fields=[in_port(129)],
                                      actions=[output(1)]))
        res = self.post('/api/v1/logical_devices/{}/flows'.format(id),
                        MessageToDict(req, preserving_proto_field_name=True),
                        expected_http_code=200)
        # TODO check some stuff on res

        res = self.get('/api/v1/logical_devices/{}/flows'.format(id))
        len_after = len(res['items'])
        self.assertGreater(len_after, len_before)

    def _list_and_update_logical_device_flow_groups(self, id):

        # retrieve flow list
        res = self.get('/api/v1/logical_devices/{}/flow_groups'.format(id))
        len_before = len(res['items'])

        # add some flows
        req = ofp.FlowGroupTableUpdate(
            id=id,
            group_mod=ofp.ofp_group_mod(
                command=ofp.OFPGC_ADD,
                type=ofp.OFPGT_ALL,
                group_id=len_before + 1,
                buckets=[
                    ofp.ofp_bucket(actions=[
                        ofp.ofp_action(type=ofp.OFPAT_OUTPUT,
                                       output=ofp.ofp_action_output(port=1))
                    ])
                ]))
        res = self.post('/api/v1/logical_devices/{}/flow_groups'.format(id),
                        MessageToDict(req, preserving_proto_field_name=True),
                        expected_http_code=200)
        # TODO check some stuff on res

        res = self.get('/api/v1/logical_devices/{}/flow_groups'.format(id))
        len_after = len(res['items'])
        self.assertGreater(len_after, len_before)

    def _list_devices(self):
        res = self.get('/api/v1/devices')
        self.assertGreaterEqual(len(res['items']), 2)
        return res

    def _get_device(self, id):
        res = self.get('/api/v1/devices/{}'.format(id))
        # TODO test result

    def _list_device_ports(self, id):
        res = self.get('/api/v1/devices/{}/ports'.format(id))
        self.assertGreaterEqual(len(res['items']), 2)

    def _list_device_flows(self, id):
        # pump some flows into the logical device
        res = self.get('/api/v1/devices/{}/flows'.format(id))
        self.assertGreaterEqual(len(res['items']), 1)

    def _list_device_flow_groups(self, id):
        res = self.get('/api/v1/devices/{}/flow_groups'.format(id))
        self.assertGreaterEqual(len(res['items']), 0)

    def _list_device_types(self):
        res = self.get('/api/v1/device_types')
        self.assertGreaterEqual(len(res['items']), 2)
        return res

    def _get_device_type(self, dtype):
        res = self.get('/api/v1/device_types/{}'.format(dtype))
        self.assertIsNotNone(res)
        # TODO test the result

    def _list_device_groups(self):
        pass
        # res = self.get('/api/v1/device_groups')
        # self.assertGreaterEqual(len(res['items']), 1)

    def _get_device_group(self):
        pass
Esempio n. 24
0
class TestLocalRestCalls(RestBase):

    # Retrieve details of the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    def test_02_local_rest_apis(self):
        # ~~~~~~~~~~~~~~~~ VOLTHA INSTANCE LEVEL OPERATIONS ~~~~~~~~~~~~~~~~~~~
        self._get_local()
        self._get_local_health()
        self._list_local_adapters()
        ldevices = self._list_local_logical_devices()
        logical_device_id = ldevices[0]['id']
        self._get_local_logical_device(logical_device_id)
        self._list_local_logical_device_ports(logical_device_id)
        self._list_and_update_local_logical_device_flows(logical_device_id)
        self._list_and_update_local_logical_device_flow_groups(
            logical_device_id)
        devices = self._list_local_devices()
        device_id = devices['items'][0]['id']
        self._get_local_device(device_id)
        self._list_local_device_ports(device_id)
        self._list_local_device_flows(device_id)
        self._list_local_device_flow_groups(device_id)
        dtypes = self._list_local_device_types()
        self._get_local_device_type(dtypes['items'][0]['id'])

    def _get_local(self):
        self.assertEqual(self.get('/api/v1/local')['version'], '0.9.0')

    def _get_local_health(self):
        d = self.get('/api/v1/local/health')
        self.assertEqual(d['state'], 'HEALTHY')

    def _list_local_adapters(self):
        self.assertGreaterEqual(
            len(self.get('/api/v1/local/adapters')['items']), 1)

    def _list_local_logical_devices(self):
        res = self.get('/api/v1/local/logical_devices')['items']
        self.assertGreaterEqual(res, 1)
        return res

    def _get_local_logical_device(self, id):
        res = self.get('/api/v1/local/logical_devices/{}'.format(id))
        self.assertIsNotNone(res['datapath_id'])

    def _list_local_logical_device_ports(self, id):
        res = self.get('/api/v1/local/logical_devices/{}/ports'.format(id))
        self.assertGreaterEqual(len(res['items']), 1)

    def _list_and_update_local_logical_device_flows(self, id):

        # retrieve flow list
        res = self.get('/api/v1/local/logical_devices/{}/flows'.format(id))
        len_before = len(res['items'])

        t0 = time()
        # add some flows
        for _ in xrange(10):
            req = ofp.FlowTableUpdate(
                id=id,
                flow_mod=mk_simple_flow_mod(
                    cookie=randint(1, 10000000000),
                    priority=randint(1, 10000),  # to make it unique
                    match_fields=[in_port(129)],
                    actions=[output(1)]))
            self.post('/api/v1/local/logical_devices/{}/flows'.format(id),
                      MessageToDict(req, preserving_proto_field_name=True),
                      expected_http_code=200)
        print time() - t0

        res = self.get('/api/v1/local/logical_devices/{}/flows'.format(id))
        len_after = len(res['items'])
        self.assertGreater(len_after, len_before)

    def _list_and_update_local_logical_device_flow_groups(self, id):

        # retrieve flow list
        res = self.get('/api/v1/local/logical_devices/{'
                       '}/flow_groups'.format(id))
        len_before = len(res['items'])

        # add some flows
        req = ofp.FlowGroupTableUpdate(
            id=id,
            group_mod=ofp.ofp_group_mod(
                command=ofp.OFPGC_ADD,
                type=ofp.OFPGT_ALL,
                group_id=len_before + 1,
                buckets=[
                    ofp.ofp_bucket(actions=[
                        ofp.ofp_action(type=ofp.OFPAT_OUTPUT,
                                       output=ofp.ofp_action_output(port=1))
                    ])
                ]))

        res = self.post('/api/v1/local/logical_devices/{'
                        '}/flow_groups'.format(id),
                        MessageToDict(req, preserving_proto_field_name=True),
                        expected_http_code=200)
        # TODO check some stuff on res

        res = self.get('/api/v1/local/logical_devices/{'
                       '}/flow_groups'.format(id))
        len_after = len(res['items'])
        self.assertGreater(len_after, len_before)

    def _list_local_devices(self):
        res = self.get('/api/v1/local/devices')
        self.assertGreaterEqual(len(res['items']), 2)
        return res

    def _get_local_device(self, id):
        res = self.get('/api/v1/local/devices/{}'.format(id))
        self.assertIsNotNone(res)

    def _list_local_device_ports(self, id):
        res = self.get('/api/v1/local/devices/{}/ports'.format(id))
        self.assertGreaterEqual(len(res['items']), 2)

    def _list_local_device_flows(self, id):
        res = self.get('/api/v1/local/devices/{}/flows'.format(id))
        self.assertGreaterEqual(len(res['items']), 0)

    def _list_local_device_flow_groups(self, id):
        res = self.get('/api/v1/local/devices/{}/flow_groups'.format(id))
        self.assertGreaterEqual(len(res['items']), 0)

    def _list_local_device_types(self):
        res = self.get('/api/v1/local/device_types')
        self.assertGreaterEqual(len(res['items']), 2)
        return res

    def _get_local_device_type(self, type):
        res = self.get('/api/v1/local/device_types/{}'.format(type))
        self.assertIsNotNone(res)

    def _list_local_device_groups(self):
        pass
        # res = self.get('/api/v1/local/device_groups')
        # self.assertGreaterEqual(len(res['items']), 1)

    def _get_local_device_group(self):
        pass
Esempio n. 25
0
    port = options.graphite_port

    # Connect to Graphite
    try:
        graphite = Graphite(host, port)
    except socket.error, e:
        print "Could not connect to graphite host %s:%s" % (host, port)
        sys.exit(1)
    except socket.gaierror, e:
        print "Invalid hostname for graphite host %s" % (host)
        sys.exit(1)
    log.info('Connected to graphite at {}:{}'.format(host, port))

    # Resolve Kafka value if it is based on consul lookup
    if kafka.startswith('@'):
        kafka = get_endpoint_from_consul(consul, kafka[1:])

    # Connect to Kafka
    try:
        log.info('connect-to-kafka', kafka=kafka)
        consumer = KafkaConsumer(topic, bootstrap_servers=kafka)
    except KafkaError, e:
        log.error('failed-to-connect-to-kafka', kafka=kafka, e=e)
        sys.exit(1)

    # Consume Kafka topic
    log.info('start-loop', topic=topic)
    for record in consumer:
        assert isinstance(record, ConsumerRecord)
        msg = record.value
    def get_rest_endpoint(self):
        # Retrieve details on the REST entry point
        rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')

        # Construct the base_url
        self.base_url = 'https://' + rest_endpoint
Esempio n. 27
0
            "aes_indicator": True,
            "name": "GEMPORT 2",
            "traffic_class": 0,
            "itf_ref": "Enet UNI 1",
            "tcont_ref": "TCont 2",
            "gemport_id": 0
            }
        }
    }
]

#for ordering the test cases
id = 3
LOCAL_CONSUL = "localhost:8500"
# Retrieve details of the REST entry point
rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'chameleon-rest')
# Construct the base_url
base_url = 'https://' + rest_endpoint


class GlobalPreChecks(RestBase):
    def test_000_get_root(self):
        res = self.get('/#!/', expected_content_type='text/html')
        self.assertGreaterEqual(res.find('swagger'), 0)

    def test_001_get_health(self):
        res = self.get('/health')
        self.assertEqual(res['state'], 'HEALTHY')


class TestXPon(RestBase):
 def set_rest_endpoint(self):
     self.rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL,
                                                   'envoy-8443')
     self.base_url = 'https://' + self.rest_endpoint
Esempio n. 29
0
    def get_rest_endpoint(self):
        # Retrieve details on the REST entry point
        rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')

        # Construct the base_url
        self.base_url = 'https://' + rest_endpoint
Esempio n. 30
0
 def set_rest_endpoint(self):
     self.rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL,
                                                   'chameleon-rest')
     self.base_url = 'https://' + self.rest_endpoint
class TestDeviceStateChangeSequence(RestBase):
    """
    The prerequisite for this test are:
     1. voltha ensemble is running
          docker-compose -f compose/docker-compose-system-test.yml up -d
     2. ponsim olt is running with 1 OLT and 4 ONUs
          sudo -s
          . ./env.sh
          ./ponsim/main.py -v -o 4
    """

    # Retrieve details of the REST entry point
    if orch_env == 'k8s-single-node':
        rest_endpoint = get_pod_ip('voltha') + ':8443'
        olt_host_and_port = get_pod_ip('olt') + ':50060'
    elif orch_env == 'swarm-single-node':
        rest_endpoint = 'localhost:8443'
        olt_host_and_port = 'localhost:50060'
    else:
        rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL,
                                                 'voltha-envoy-8443')
        olt_host_and_port = '172.17.0.1:50060'

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    def wait_till(self, msg, predicate, interval=0.1, timeout=5.0):
        deadline = time() + timeout
        while time() < deadline:
            if predicate():
                return
            sleep(interval)
        self.fail('Timed out while waiting for condition: {}'.format(msg))

    def test_device_state_changes_scenarios(self):

        self.verify_prerequisites()
        # Test basic scenario

        self.basic_scenario()
        self.failure_scenario()

    def basic_scenario(self):
        """
        Test the enable -> disable -> enable -> disable -> delete for OLT
        and ONU.
        """
        self.assert_no_device_present()
        olt_id = self.add_olt_device()
        self.verify_device_preprovisioned_state(olt_id)
        self.enable_device(olt_id)
        ldev_id = self.wait_for_logical_device(olt_id)
        onu_ids = self.wait_for_onu_discovery(olt_id)
        self.verify_logical_ports(ldev_id, 5)
        self.simulate_eapol_flow_install(ldev_id, olt_id, onu_ids)
        self.verify_olt_eapol_flow(olt_id)
        olt_ids, onu_ids = self.get_devices()
        self.disable_device(onu_ids[0])
        self.verify_logical_ports(ldev_id, 4)
        self.enable_device(onu_ids[0])
        self.verify_logical_ports(ldev_id, 5)
        self.simulate_eapol_flow_install(ldev_id, olt_id, onu_ids)
        self.verify_olt_eapol_flow(olt_id)
        self.disable_device(olt_ids[0])
        self.assert_all_onus_state(olt_ids[0], 'DISABLED', 'UNKNOWN')
        self.assert_no_logical_device()
        self.enable_device(olt_ids[0])
        self.assert_all_onus_state(olt_ids[0], 'ENABLED', 'ACTIVE')
        self.wait_for_logical_device(olt_ids[0])
        self.simulate_eapol_flow_install(ldev_id, olt_id, onu_ids)
        self.verify_olt_eapol_flow(olt_id)
        self.disable_device(onu_ids[0])
        # self.delete_device(onu_ids[0])
        self.verify_logical_ports(ldev_id, 4)
        self.disable_device(olt_ids[0])
        self.delete_device(olt_ids[0])
        self.assert_no_device_present()

    def failure_scenario(self):
        self.assert_no_device_present()
        olt_id = self.add_olt_device()
        self.verify_device_preprovisioned_state(olt_id)
        self.enable_device(olt_id)
        ldev_id = self.wait_for_logical_device(olt_id)
        onu_ids = self.wait_for_onu_discovery(olt_id)
        self.verify_logical_ports(ldev_id, 5)
        self.simulate_eapol_flow_install(ldev_id, olt_id, onu_ids)
        self.verify_olt_eapol_flow(olt_id)
        self.delete_device_incorrect_state(olt_id)
        self.delete_device_incorrect_state(onu_ids[0])
        unknown_id = '9999999999'
        self.enable_unknown_device(unknown_id)
        self.disable_unknown_device(unknown_id)
        self.delete_unknown_device(unknown_id)
        latest_olt_ids, latest_onu_ids = self.get_devices()
        self.assertEqual(len(latest_olt_ids), 1)
        self.assertEqual(len(latest_onu_ids), 4)
        self.verify_logical_ports(ldev_id, 5)
        self.simulate_eapol_flow_install(ldev_id, olt_id, onu_ids)
        # Cleanup
        self.disable_device(olt_id)
        self.delete_device(olt_id)
        self.assert_no_device_present()

    def verify_prerequisites(self):
        # all we care is that Voltha is available via REST using the base uri
        self.get('/api/v1')

    def get_devices(self):
        devices = self.get('/api/v1/devices')['items']
        olt_ids = []
        onu_ids = []
        for d in devices:
            if d['adapter'] == 'ponsim_olt':
                olt_ids.append(d['id'])
            elif d['adapter'] == 'ponsim_onu':
                onu_ids.append(d['id'])
            else:
                onu_ids.append(d['id'])
        return olt_ids, onu_ids

    def add_olt_device(self):
        device = Device(type='ponsim_olt',
                        host_and_port=self.olt_host_and_port)
        device = self.post('/api/v1/devices',
                           MessageToDict(device),
                           expected_http_code=200)
        return device['id']

    def verify_device_preprovisioned_state(self, olt_id):
        # we also check that so far what we read back is same as what we get
        # back on create
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['id'], '')
        self.assertEqual(device['adapter'], 'ponsim_olt')
        self.assertEqual(device['admin_state'], 'PREPROVISIONED')
        self.assertEqual(device['oper_status'], 'UNKNOWN')

    def enable_device(self, olt_id):
        path = '/api/v1/devices/{}'.format(olt_id)
        self.post(path + '/enable', expected_http_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

        self.wait_till(
            'admin state moves to ACTIVATING or ACTIVE',
            lambda: self.get(path)['oper_status'] in ('ACTIVATING', 'ACTIVE'))

        # eventually, it shall move to active state and by then we shall have
        # device details filled, connect_state set, and device ports created
        self.wait_till('admin state ACTIVE',
                       lambda: self.get(path)['oper_status'] == 'ACTIVE')
        device = self.get(path)
        self.assertEqual(device['connect_status'], 'REACHABLE')

        ports = self.get(path + '/ports')['items']
        self.assertEqual(len(ports), 2)

    def wait_for_logical_device(self, olt_id):
        # we shall find the logical device id from the parent_id of the olt
        # (root) device
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['parent_id'], '')
        logical_device = self.get('/api/v1/logical_devices/{}'.format(
            device['parent_id']))

        # the logical device shall be linked back to the hard device,
        # its ports too
        self.assertEqual(logical_device['root_device_id'], device['id'])

        logical_ports = self.get('/api/v1/logical_devices/{}/ports'.format(
            logical_device['id']))['items']
        self.assertGreaterEqual(len(logical_ports), 1)
        logical_port = logical_ports[0]
        self.assertEqual(logical_port['id'], 'nni')
        self.assertEqual(logical_port['ofp_port']['name'], 'nni')
        self.assertEqual(logical_port['ofp_port']['port_no'], 0)
        self.assertEqual(logical_port['device_id'], device['id'])
        self.assertEqual(logical_port['device_port_no'], 2)
        return logical_device['id']

    def find_onus(self, olt_id):
        devices = self.get('/api/v1/devices')['items']
        return [d for d in devices if d['parent_id'] == olt_id]

    def wait_for_onu_discovery(self, olt_id):
        # shortly after we shall see the discovery of four new onus, linked to
        # the olt device
        self.wait_till('find four ONUs linked to the olt device',
                       lambda: len(self.find_onus(olt_id)) >= 4)
        # verify that they are properly set
        onus = self.find_onus(olt_id)
        for onu in onus:
            self.assertEqual(onu['admin_state'], 'ENABLED')
            self.assertEqual(onu['oper_status'], 'ACTIVE')

        return [onu['id'] for onu in onus]

    def assert_all_onus_state(self, olt_id, admin_state, oper_state):
        # verify all onus are in a given state
        onus = self.find_onus(olt_id)
        for onu in onus:
            self.assertEqual(onu['admin_state'], admin_state)
            self.assertEqual(onu['oper_status'], oper_state)

        return [onu['id'] for onu in onus]

    def assert_onu_state(self, onu_id, admin_state, oper_state):
        # Verify the onu states are correctly set
        onu = self.get('/api/v1/devices/{}'.format(onu_id))
        self.assertEqual(onu['admin_state'], admin_state)
        self.assertEqual(onu['oper_status'], oper_state)

    def verify_logical_ports(self, ldev_id, num_ports):

        # at this point we shall see num_ports logical ports on the
        # logical device
        logical_ports = self.get(
            '/api/v1/logical_devices/{}/ports'.format(ldev_id))['items']
        self.assertGreaterEqual(len(logical_ports), num_ports)

        # verify that all logical ports are LIVE (state=4)
        for lport in logical_ports:
            self.assertEqual(lport['ofp_port']['state'], 4)

    def simulate_eapol_flow_install(self, ldev_id, olt_id, onu_ids):

        # emulate the flow mod requests that shall arrive from the SDN
        # controller, one for each ONU
        lports = self.get(
            '/api/v1/logical_devices/{}/ports'.format(ldev_id))['items']

        # device_id -> logical port map, which we will use to construct
        # our flows
        lport_map = dict((lp['device_id'], lp) for lp in lports)
        for onu_id in onu_ids:
            # if eth_type == 0x888e => send to controller
            _in_port = lport_map[onu_id]['ofp_port']['port_no']
            req = ofp.FlowTableUpdate(
                id=ldev_id,
                flow_mod=mk_simple_flow_mod(
                    match_fields=[
                        in_port(_in_port),
                        vlan_vid(ofp.OFPVID_PRESENT | 0),
                        eth_type(0x888e)
                    ],
                    actions=[output(ofp.OFPP_CONTROLLER)],
                    priority=1000))
            res = self.post('/api/v1/logical_devices/{}/flows'.format(ldev_id),
                            MessageToDict(req,
                                          preserving_proto_field_name=True),
                            expected_http_code=200)

        # for sanity, verify that flows are in flow table of logical device
        flows = self.get(
            '/api/v1/logical_devices/{}/flows'.format(ldev_id))['items']
        self.assertGreaterEqual(len(flows), 4)

    def verify_olt_eapol_flow(self, olt_id):
        flows = self.get('/api/v1/devices/{}/flows'.format(olt_id))['items']
        self.assertEqual(len(flows), 8)
        flow = flows[1]
        self.assertEqual(flow['table_id'], 0)
        self.assertEqual(flow['priority'], 1000)

        # TODO refine this
        # self.assertEqual(flow['match'], {})
        # self.assertEqual(flow['instructions'], [])

    def disable_device(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.post(path + '/disable', expected_http_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'DISABLED')

        self.wait_till('operational state moves to UNKNOWN',
                       lambda: self.get(path)['oper_status'] == 'UNKNOWN')

        # eventually, the connect_state should be UNREACHABLE
        self.wait_till(
            'connect status UNREACHABLE',
            lambda: self.get(path)['connect_status'] == 'UNREACHABLE')

        # Device's ports should be INACTIVE
        ports = self.get(path + '/ports')['items']
        self.assertEqual(len(ports), 2)
        for p in ports:
            self.assertEqual(p['admin_state'], 'DISABLED')
            self.assertEqual(p['oper_status'], 'UNKNOWN')

    def delete_device(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.delete(path + '/delete', expected_http_code=200, grpc_status=0)
        device = self.get(path, expected_http_code=200, grpc_status=5)
        self.assertIsNone(device)

    def assert_no_device_present(self):
        path = '/api/v1/devices'
        devices = self.get(path)['items']
        self.assertEqual(devices, [])

    def assert_no_logical_device(self):
        path = '/api/v1/logical_devices'
        ld = self.get(path)['items']
        self.assertEqual(ld, [])

    def delete_device_incorrect_state(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.delete(path + '/delete', expected_http_code=200, grpc_status=3)

    def enable_unknown_device(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.post(path + '/enable', expected_http_code=200, grpc_status=5)

    def disable_unknown_device(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.post(path + '/disable', expected_http_code=200, grpc_status=5)

    def delete_unknown_device(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.delete(path + '/delete', expected_http_code=200, grpc_status=5)
class TestColdActivationSequence(RestBase):

    # Retrieve details of the REST entry point
    if orch_env == 'k8s-single-node':
        rest_endpoint = get_pod_ip('voltha') + ':8443'
    elif orch_env == 'swarm-single-node':
        rest_endpoint = 'localhost:8443'
    else:
        rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL,
                                                 'voltha-envoy-8443')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint
    log.debug('cold-activation-test', base_url=base_url)

    def wait_till(self, msg, predicate, interval=0.1, timeout=5.0):
        deadline = time() + timeout
        while time() < deadline:
            if predicate():
                return
            sleep(interval)
        self.fail('Timed out while waiting for condition: {}'.format(msg))

    def test_cold_activation_sequence(self):
        """Complex test-case to cover device activation sequence"""

        self.verify_prerequisites()
        olt_id = self.add_olt_device()
        self.verify_device_preprovisioned_state(olt_id)
        self.activate_device(olt_id)
        ldev_id = self.wait_for_logical_device(olt_id)
        onu_ids = self.wait_for_onu_discovery(olt_id)
        self.verify_logical_ports(ldev_id)
        self.simulate_eapol_flow_install(ldev_id, olt_id, onu_ids)
        self.verify_olt_eapol_flow(olt_id)
        self.verify_onu_forwarding_flows(onu_ids)
        self.simulate_eapol_start()
        self.simulate_eapol_request_identity()
        self.simulate_eapol_response_identity()
        self.simulate_eapol_request()
        self.simulate_eapol_response()
        self.simulate_eapol_success()
        self.install_and_verify_dhcp_flows()
        self.install_and_verify_igmp_flows()
        self.install_and_verifyunicast_flows()

    def verify_prerequisites(self):
        # all we care is that Voltha is available via REST using the base uri
        self.get('/api/v1')

    def add_olt_device(self):
        device = Device(type='simulated_olt', mac_address='00:00:00:00:00:01')
        device = self.post('/api/v1/devices',
                           MessageToDict(device),
                           expected_http_code=200)
        return device['id']

    def verify_device_preprovisioned_state(self, olt_id):
        # we also check that so far what we read back is same as what we get
        # back on create
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['id'], '')
        self.assertEqual(device['adapter'], 'simulated_olt')
        self.assertEqual(device['admin_state'], 'PREPROVISIONED')
        self.assertEqual(device['oper_status'], 'UNKNOWN')

    def activate_device(self, olt_id):
        path = '/api/v1/devices/{}'.format(olt_id)
        self.post(path + '/enable', expected_http_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

        self.wait_till('admin state moves to ACTIVATING or ACTIVE',
                       lambda: self.get(path)['oper_status'] in
                       ('ACTIVATING', 'ACTIVE'),
                       timeout=0.5)

        # eventually, it shall move to active state and by then we shall have
        # device details filled, connect_state set, and device ports created
        self.wait_till('admin state ACTIVE',
                       lambda: self.get(path)['oper_status'] == 'ACTIVE',
                       timeout=0.5)
        device = self.get(path)
        images = device['images']
        image = images['image']
        image_1 = image[0]
        version = image_1['version']
        self.assertNotEqual(version, '')
        self.assertEqual(device['connect_status'], 'REACHABLE')

        ports = self.get(path + '/ports')['items']
        self.assertEqual(len(ports), 2)

    def wait_for_logical_device(self, olt_id):
        # we shall find the logical device id from the parent_id of the olt
        # (root) device
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['parent_id'], '')
        logical_device = self.get('/api/v1/logical_devices/{}'.format(
            device['parent_id']))

        # the logical device shall be linked back to the hard device,
        # its ports too
        self.assertEqual(logical_device['root_device_id'], device['id'])

        logical_ports = self.get('/api/v1/logical_devices/{}/ports'.format(
            logical_device['id']))['items']
        self.assertGreaterEqual(len(logical_ports), 1)
        logical_port = logical_ports[0]
        self.assertEqual(logical_port['id'], 'nni')
        self.assertEqual(logical_port['ofp_port']['name'], 'nni')
        self.assertEqual(logical_port['ofp_port']['port_no'], 129)
        self.assertEqual(logical_port['device_id'], device['id'])
        self.assertEqual(logical_port['device_port_no'], 2)
        return logical_device['id']

    def wait_for_onu_discovery(self, olt_id):
        # shortly after we shall see the discovery of four new onus, linked to
        # the olt device
        def find_our_onus():
            devices = self.get('/api/v1/devices')['items']
            return [d for d in devices if d['parent_id'] == olt_id]

        self.wait_till('find ONUs linked to the olt device',
                       lambda: len(find_our_onus()) >= 1, 2)

        # verify that they are properly set
        onus = find_our_onus()
        for onu in onus:
            self.assertEqual(onu['admin_state'], 'ENABLED')
            self.assertEqual(onu['oper_status'], 'ACTIVE')

        return [onu['id'] for onu in onus]

    def verify_logical_ports(self, ldev_id):

        # at this point we shall see at least 5 logical ports on the
        # logical device
        logical_ports = self.get(
            '/api/v1/logical_devices/{}/ports'.format(ldev_id))['items']
        self.assertGreaterEqual(len(logical_ports), 5)

        # verify that all logical ports are LIVE (state=4)
        for lport in logical_ports:
            self.assertEqual(lport['ofp_port']['state'], 4)

    def simulate_eapol_flow_install(self, ldev_id, olt_id, onu_ids):

        # emulate the flow mod requests that shall arrive from the SDN
        # controller, one for each ONU
        lports = self.get(
            '/api/v1/logical_devices/{}/ports'.format(ldev_id))['items']

        # device_id -> logical port map, which we will use to construct
        # our flows
        lport_map = dict((lp['device_id'], lp) for lp in lports)
        for onu_id in onu_ids:
            # if eth_type == 0x888e => send to controller
            _in_port = lport_map[onu_id]['ofp_port']['port_no']
            req = ofp.FlowTableUpdate(
                id=ldev_id,
                flow_mod=mk_simple_flow_mod(
                    match_fields=[
                        in_port(_in_port),
                        vlan_vid(ofp.OFPVID_PRESENT | 0),
                        eth_type(0x888e)
                    ],
                    actions=[output(ofp.OFPP_CONTROLLER)],
                    priority=1000))
            res = self.post('/api/v1/logical_devices/{}/flows'.format(ldev_id),
                            MessageToDict(req,
                                          preserving_proto_field_name=True),
                            expected_http_code=200)

        # for sanity, verify that flows are in flow table of logical device
        flows = self.get(
            '/api/v1/logical_devices/{}/flows'.format(ldev_id))['items']
        self.assertGreaterEqual(len(flows), 4)

    def verify_olt_eapol_flow(self, olt_id):
        flows = self.get('/api/v1/devices/{}/flows'.format(olt_id))['items']
        self.assertEqual(len(flows), 8)
        flow = flows[1]
        self.assertEqual(flow['table_id'], 0)
        self.assertEqual(flow['priority'], 1000)

        # TODO refine this
        # self.assertEqual(flow['match'], {})
        # self.assertEqual(flow['instructions'], [])

    def verify_onu_forwarding_flows(self, onu_ids):
        pass

    def simulate_eapol_start(self):
        pass

    def simulate_eapol_request_identity(self):
        pass

    def simulate_eapol_response_identity(self):
        pass

    def simulate_eapol_request(self):
        pass

    def simulate_eapol_response(self):
        pass

    def simulate_eapol_success(self):
        pass

    def install_and_verify_dhcp_flows(self):
        pass

    def install_and_verify_igmp_flows(self):
        pass

    def install_and_verifyunicast_flows(self):
        pass
Esempio n. 33
0
 def set_kafka_endpoint(self):
     self.kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'kafka')
Esempio n. 34
0
 def set_kafka_endpoint(self):
     self.kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'kafka')
class VolthaDeviceManagementRetrieveSoftwareInfo(RestBase):
    # Retrieve details on the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'chameleon-rest')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    def wait_till(self, msg, predicate, interval=0.1, timeout=5.0):
        deadline = time() + timeout
        while time() < deadline:
            if predicate():
                return
            sleep(interval)
        self.fail('Timed out while waiting for condition: {}'.format(msg))

    # ~~~~~~~~~~~~ Tests ~~~~~~~~~~~~
    def test_01_voltha_device_management_retrieve_images(self):
        # Make sure the Voltha REST interface is available
        self.verify_rest()

        # Create a new device
        device = self.add_device()

        # Activate the new device
        self.activate_device(device['id'])

        # wait till device moves to ACTIVE state
        self.wait_till('admin state moves from ACTIVATING to ACTIVE',
                       lambda: self.get('/api/v1/devices/{}'.format(device[
                           'id']))['oper_status'] in ('ACTIVE'),
                       timeout=5.0)

        # Give some time before ONUs are detected
        sleep(2.0)

        # Retrieve the images for the device
        images = self.get_images(device['id'])

        # Validate the schema for the software info
        self.validate_images_schema(images)

    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    def verify_rest(self):
        self.get('/api/v1')

    # Create a new simulated device
    def add_device(self):
        device = Device(type='simulated_olt', )
        device = self.post('/api/v1/local/devices',
                           MessageToDict(device),
                           expected_code=200)
        return device

    # Active the simulated device.
    def activate_device(self, device_id):
        path = '/api/v1/local/devices/{}'.format(device_id)
        self.post(path + '/enable', expected_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

    # Retrieve software info on the device
    def get_images(self, device_id):
        path = '/api/v1/local/devices/{}/images'.format(device_id)
        images = self.get(path)
        return images

    def validate_images_schema(self, images):
        try:
            jsonschema.validate(images, IMAGES_SCHEMA)
        except Exception as e:
            self.assertTrue(
                False, 'Validation failed for images: {}'.format(e.message))
Esempio n. 36
0
    port = options.graphite_port

    # Connect to Graphite
    try:
        graphite = Graphite(host, port)
    except socket.error, e:
        print "Could not connect to graphite host %s:%s" % (host, port)
        sys.exit(1)
    except socket.gaierror, e:
        print "Invalid hostname for graphite host %s" % (host)
        sys.exit(1)
    log.info('Connected to graphite at {}:{}'.format(host, port))

    # Resolve Kafka value if it is based on consul lookup
    if kafka.startswith('@'):
        kafka = get_endpoint_from_consul(consul, kafka[1:])

    # Connect to Kafka
    try:
        log.info('connect-to-kafka', kafka=kafka)
        consumer = KafkaConsumer(topic, bootstrap_servers=kafka)
    except KafkaError, e:
        log.error('failed-to-connect-to-kafka', kafka=kafka, e=e)
        sys.exit(1)

    # Consume Kafka topic
    log.info('start-loop', topic=topic)
    for record in consumer:
        assert isinstance(record, ConsumerRecord)
        msg = record.value
Esempio n. 37
0
class TestDeviceStateChangeSequence(RestBase):
    """
    The prerequisite for this test are:
     1. voltha ensemble is running
          docker-compose -f compose/docker-compose-system-test.yml up -d
     2. ponsim olt is running with 1 OLT and 4 ONUs using device_type 'bal'
          sudo -s
          . ./env.sh
          ./ponsim/main.py -v -o 4 -d bal
    """

    # Retrieve details of the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    def wait_till(self, msg, predicate, interval=0.1, timeout=5.0):
        deadline = time() + timeout
        while time() < deadline:
            if predicate():
                return
            sleep(interval)
        self.fail('Timed out while waiting for condition: {}'.format(msg))

    def test_device_state_changes_scenarios(self):

        self.verify_prerequisites()
        # Test basic scenario

        self.basic_scenario()

    def basic_scenario(self):
        """
        Test the enable -> disable -> enable -> disable -> delete for OLT
        """
        self.assert_no_device_present()
        olt_id = self.add_olt_device()
        self.verify_device_preprovisioned_state(olt_id)
        self.enable_device(olt_id)
        ldev_id = self.wait_for_logical_device(olt_id)
        self.verify_logical_ports(ldev_id, 1)
        olt_ids, _ = self.get_devices()
        self.disable_device(olt_ids[0])
        self.assert_no_logical_device()
        self.delete_device(olt_ids[0])
        self.assert_no_device_present()

    def verify_prerequisites(self):
        # all we care is that Voltha is available via REST using the base uri
        self.get('/api/v1')

    def get_devices(self):
        devices = self.get('/api/v1/devices')['items']
        olt_ids = []
        onu_ids = []
        for d in devices:
            if d['adapter'] == 'asfvolt16_olt':
                olt_ids.append(d['id'])
        return olt_ids, onu_ids

    def add_olt_device(self):
        device = Device(type='asfvolt16_olt', host_and_port='172.17.0.1:50060')
        device = self.post('/api/v1/devices',
                           MessageToDict(device),
                           expected_http_code=200)
        return device['id']

    def verify_device_preprovisioned_state(self, olt_id):
        # we also check that so far what we read back is same as what we get
        # back on create
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['id'], '')
        self.assertEqual(device['adapter'], 'asfvolt16_olt')
        self.assertEqual(device['admin_state'], 'PREPROVISIONED')
        self.assertEqual(device['oper_status'], 'UNKNOWN')

    def enable_device(self, olt_id):
        path = '/api/v1/devices/{}'.format(olt_id)
        self.post(path + '/enable', expected_http_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

        self.wait_till('admin state moves to ACTIVATING or ACTIVE',
                       lambda: self.get(path)['oper_status'] in
                       ('ACTIVATING', 'ACTIVE'),
                       timeout=0.5)

        # eventually, it shall move to active state and by then we shall have
        # device details filled, connect_state set, and device ports created
        '''
        # The check for ACTIVE is suppressed since the indications
        # portion of the code is not yet ready.
        self.wait_till(
            'admin state ACTIVE',
            lambda: self.get(path)['oper_status'] == 'ACTIVE',
            timeout=0.5)
        device = self.get(path)
        '''
        self.assertEqual(device['connect_status'], 'REACHABLE')

        ports = self.get(path + '/ports')['items']
        #self.assertEqual(len(ports), 2)
        self.assertEqual(len(ports), 1)

    def wait_for_logical_device(self, olt_id):
        # we shall find the logical device id from the parent_id of the olt
        # (root) device
        device = self.get('/api/v1/devices/{}'.format(olt_id))
        self.assertNotEqual(device['parent_id'], '')
        logical_device = self.get('/api/v1/logical_devices/{}'.format(
            device['parent_id']))

        # the logical device shall be linked back to the hard device,
        # its ports too
        self.assertEqual(logical_device['root_device_id'], device['id'])

        logical_ports = self.get('/api/v1/logical_devices/{}/ports'.format(
            logical_device['id']))['items']
        self.assertGreaterEqual(len(logical_ports), 1)
        logical_port = logical_ports[0]
        self.assertEqual(logical_port['id'], 'nni')
        self.assertEqual(logical_port['ofp_port']['name'], 'nni')
        self.assertEqual(logical_port['ofp_port']['port_no'], 0)
        self.assertEqual(logical_port['device_id'], device['id'])
        self.assertEqual(logical_port['device_port_no'], 50)
        return logical_device['id']

    def verify_logical_ports(self, ldev_id, num_ports):

        # at this point we shall see num_ports logical ports on the
        # logical device
        logical_ports = self.get(
            '/api/v1/logical_devices/{}/ports'.format(ldev_id))['items']
        self.assertGreaterEqual(len(logical_ports), num_ports)

        # verify that all logical ports are LIVE (state=4)
        for lport in logical_ports:
            self.assertEqual(lport['ofp_port']['state'], 4)

    def disable_device(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.post(path + '/disable', expected_http_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'DISABLED')

        self.wait_till('operational state moves to UNKNOWN',
                       lambda: self.get(path)['oper_status'] == 'UNKNOWN',
                       timeout=0.5)

        # eventually, the connect_state should be UNREACHABLE
        self.wait_till(
            'connest status UNREACHABLE',
            lambda: self.get(path)['connect_status'] == 'UNREACHABLE',
            timeout=0.5)

        # Device's ports should be INACTIVE
        ports = self.get(path + '/ports')['items']
        #self.assertEqual(len(ports), 2)
        self.assertEqual(len(ports), 1)
        for p in ports:
            self.assertEqual(p['admin_state'], 'DISABLED')
            self.assertEqual(p['oper_status'], 'UNKNOWN')

    def delete_device(self, id):
        path = '/api/v1/devices/{}'.format(id)
        self.delete(path + '/delete', expected_http_code=200)
        device = self.get(path, expected_http_code=200, grpc_status=5)
        self.assertIsNone(device)

    def assert_no_device_present(self):
        path = '/api/v1/devices'
        devices = self.get(path)['items']
        self.assertEqual(devices, [])

    def assert_no_logical_device(self):
        path = '/api/v1/logical_devices'
        ld = self.get(path)['items']
        self.assertEqual(ld, [])
Esempio n. 38
0
class VolthaAlarmEventTests(RestBase):
    # Retrieve details on the REST entry point
    rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')

    # Construct the base_url
    base_url = 'https://' + rest_endpoint

    # Start by querying consul to get the endpoint details
    kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'kafka')

    # ~~~~~~~~~~~~ Tests ~~~~~~~~~~~~

    def test_1_alarm_topic_exists(self):
        # Produce a message to ensure that the topic exists
        cmd = COMMANDS['kafka_client_send_msg'].format(self.kafka_endpoint)
        run_long_running_command_with_timeout(cmd, 5)

        # We want to make sure that the topic is available on the system
        expected_pattern = ['voltha.alarms']

        # Start the kafka client to retrieve details on topics
        cmd = COMMANDS['kafka_client_run'].format(self.kafka_endpoint)
        kafka_client_output = run_long_running_command_with_timeout(cmd, 20)

        # Loop through the kafka client output to find the topic
        found = False
        for out in kafka_client_output:
            if all(ep in out for ep in expected_pattern):
                found = True
                break

        self.assertTrue(found,
                        'Failed to find topic {}'.format(expected_pattern))

    def test_2_alarm_generated_by_adapter(self):
        # Verify that REST calls can be made
        self.verify_rest()

        # Create a new device
        device = self.add_device()

        # Activate the new device
        self.activate_device(device['id'])

        # The simulated olt device should start generating alarms periodically
        alarm = self.get_alarm_event(device['id'])

        # Make sure that the schema is valid
        self.validate_alarm_event_schema(alarm)

        # Validate the constructed alarm id
        self.verify_alarm_event_id(device['id'], alarm['id'])

    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    # Make sure the Voltha REST interface is available
    def verify_rest(self):
        self.get('/api/v1')

    # Create a new simulated device
    def add_device(self):
        device = Device(type='simulated_olt', )
        device = self.post('/api/v1/devices',
                           MessageToDict(device),
                           expected_http_code=200)
        return device

    # Active the simulated device.
    # This will trigger the simulation of random alarms
    def activate_device(self, device_id):
        path = '/api/v1/devices/{}'.format(device_id)
        self.post(path + '/enable', expected_http_code=200)
        device = self.get(path)
        self.assertEqual(device['admin_state'], 'ENABLED')

    # Retrieve a sample alarm for a specific device
    def get_alarm_event(self, device_id):
        cmd = COMMANDS['kafka_client_alarm_check'].format(self.kafka_endpoint)
        kafka_client_output = run_long_running_command_with_timeout(cmd, 30)

        # Verify the kafka client output
        found = False
        alarm_data = None

        for out in kafka_client_output:
            # Catch any error that might occur while reading the kafka messages
            try:
                alarm_data = simplejson.loads(out)
                print alarm_data

                if not alarm_data or 'resource_id' not in alarm_data:
                    continue
                elif alarm_data['resource_id'] == device_id:
                    found = True
                    break

            except Exception as e:
                continue

        self.assertTrue(
            found,
            'Failed to find kafka alarm with device id:{}'.format(device_id))

        return alarm_data

    # Verify that the alarm follows the proper schema structure
    def validate_alarm_event_schema(self, alarm):
        try:
            jsonschema.validate(alarm, ALARM_SCHEMA)
        except Exception as e:
            self.assertTrue(
                False, 'Validation failed for alarm : {}'.format(e.message))

    # Verify that alarm identifier based on the format generated by default.
    def verify_alarm_event_id(self, device_id, alarm_id):
        prefix = re.findall(r"(voltha)\.(\w+)\.(\w+)", alarm_id)

        self.assertEqual(len(prefix), 1,
                         'Failed to parse the alarm id: {}'.format(alarm_id))
        self.assertEqual(
            len(prefix[0]), 3,
            'Expected id format: voltha.<adapter name>.<device id>')
        self.assertEqual(
            prefix[0][0], 'voltha',
            'Expected id format: voltha.<adapter name>.<device id>')
        self.assertEqual(
            prefix[0][1], 'simulated_olt',
            'Expected id format: voltha.<adapter name>.<device id>')
        self.assertEqual(
            prefix[0][2], device_id,
            'Expected id format: voltha.<adapter name>.<device id>')
Esempio n. 39
0
            "aes_indicator": True,
            "name": "GEMPORT 2",
            "traffic_class": 0,
            "itf_ref": "Enet UNI 1",
            "tcont_ref": "TCont 2",
            "gemport_id": 0
            }
        }
    }
]

#for ordering the test cases
id = 3
LOCAL_CONSUL = "localhost:8500"
# Retrieve details of the REST entry point
rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')
# Construct the base_url
BASE_URL = 'https://' + rest_endpoint

class GlobalPreChecks(RestBase):
    base_url = BASE_URL

    # def test_000_get_root(self):
    #     res = self.get('/#!/', expected_content_type='text/html')
    #     self.assertGreaterEqual(res.find('swagger'), 0)

    def test_001_get_health(self):
        res = self.get('/health')
        self.assertEqual(res['state'], 'HEALTHY')

class TestXPon(RestBase):