示例#1
0
    def reboot(self):
        self.log.info('rebooting', device_id=self.device_id)

        # Update the operational status to ACTIVATING and connect status to
        # UNREACHABLE
        device = self.adapter_agent.get_device(self.device_id)
        previous_oper_status = device.oper_status
        previous_conn_status = device.connect_status
        device.oper_status = OperStatus.ACTIVATING
        device.connect_status = ConnectStatus.UNREACHABLE
        self.adapter_agent.update_device(device)

        # Sleep 10 secs, simulating a reboot
        # TODO: send alert and clear alert after the reboot
        yield asleep(10)

        # Change the operational status back to its previous state.  With a
        # real OLT the operational state should be the state the device is
        # after a reboot.
        # Get the latest device reference
        device = self.adapter_agent.get_device(self.device_id)
        device.oper_status = previous_oper_status
        device.connect_status = previous_conn_status
        self.adapter_agent.update_device(device)
        self.log.info('rebooted', device_id=self.device_id)
    def send_packet_stream(self, stub, interval):
        queue = Queue()

        @inlineCallbacks
        def get_next_from_queue():
            packet = yield queue.get()
            returnValue(packet)

        def packet_generator():
            while 1:
                packet = queue.get(block=True)
                yield packet

        def stream(stub):
            """This is executed on its own thread"""
            generator = packet_generator()
            result = stub.SendPackets(generator)
            print 'Got this after sending packets:', result, type(result)
            return result

        reactor.callInThread(stream, stub)

        while 1:
            len = queue.qsize()
            if len < 100:
                packet = Packet(source=42, content='beefstew')
                queue.put(packet)
            yield asleep(interval)
示例#3
0
    def keep_connected(self):
        """Keep reconnecting to the controller"""
        while not self.exiting:
            host, port = self.resolve_endpoint(self.controller_endpoint)
            log.info('connecting', host=host, port=port)
            if self.enable_tls:
                try:
                    # Check that key_file and cert_file is provided and
                    # the files exist
                    if self.key_file is None or             \
                       self.cert_file is None or            \
                       not os.path.isfile(self.key_file) or \
                       not os.path.isfile(self.cert_file):
                        raise Exception('key_file "{}" or cert_file "{}"'
                                        ' is not found'.
                                         format(self.key_file, self.cert_file))
                    with open(self.key_file) as keyFile:
                        with open(self.cert_file) as certFile:
                            clientCert = ssl.PrivateCertificate.loadPEM(
                                keyFile.read() + certFile.read())

                    ctx = clientCert.options()
                    self.connector = reactor.connectSSL(host, port, self, ctx)
                    log.info('tls-enabled')

                except Exception as e:
                    log.exception('failed-to-connect', reason=e)
            else:
                self.connector = reactor.connectTCP(host, port, self)
                log.info('tls-disabled')

            self.d_disconnected = Deferred()
            yield self.d_disconnected
            log.debug('reconnect', after_delay=self.retry_interval)
            yield asleep(self.retry_interval)
示例#4
0
    def monitor_logical_devices(self):
        log.debug('start-monitor-logical-devices')

        while self.running:
            log.info('monitoring-logical-devices')

            # should change to a gRPC streaming call
            # see https://jira.opencord.org/browse/CORD-821

            try:
                if self.channel is not None and self.grpc_client is not None:
                    # get current list from Voltha
                    devices = yield self.get_list_of_logical_devices_from_voltha()

                    # update agent list and mapping tables as needed
                    self.refresh_agent_connections(devices)
                else:
                    log.info('vcore-communication-unavailable')

                # wait before next poll
                yield asleep(self.devices_refresh_interval)

            except _Rendezvous, e:
                log.error('vcore-communication-failure', exception=repr(e), status=e.code())

            except Exception as e:
                log.exception('unexpected-vcore-communication-failure', exception=repr(e))
 def echo_loop(self, stub, prefix='', interval=1.0):
     """Send an echo message and print its return value"""
     seq = 0
     while 1:
         msg = 'ECHO%05d' % seq
         pr('{}sending echo {}'.format(prefix, msg))
         request = Echo(msg=msg, delay=interval)
         response = yield threads.deferToThread(stub.GetEcho, request)
         pr('{}    got echo {}'.format(prefix, response.msg))
         seq += 1
         yield asleep(interval)
示例#6
0
    def _track_my_assignments(self, index):
        try:
            # if there is no leader yet, wait for a stable leader
            d = self.coord.wait_for_a_leader()
            if not d.called:
                yield d
                # additional time to let leader update
                # assignments, to minimize potential churn
                yield asleep(self.coord.worker_config.get(
                    self.coord.worker_config['time_to_let_leader_update'], 5))

            (index, results) = yield self.coord.kv_get(
                self.coord.assignment_prefix + self.instance_id,
                index=index, recurse=True)

            # 1. Check whether we have been assigned a full voltha instance
            if results and not self.mycore_store_id:
                # We have no store id set yet
                core_stores = [c['Value'] for c in results if
                               c['Key'] == self.coord.assignment_prefix +
                               self.instance_id + '/' +
                               self.coord.core_storage_suffix and c['Value']]
                if core_stores:
                    self.mycore_store_id = core_stores[0]
                    log.debug('store-assigned',
                              mycore_store_id=self.mycore_store_id)
                    self._stash_and_restart_core_store_soak_timer()

            # 2.  Check whether we have been assigned a work item
            if results and self.mycore_store_id:
                # Check for difference between current worload and newer one
                # TODO: Depending on how workload gets load balanced we may
                # need to add workload distribution here
                pass

        except Exception, e:
            log.exception('assignments-track-error', e=e)
            yield asleep(
                self.coord.worker_config.get(
                    self.coord.worker_config[
                        'assignments_track_error_to_avoid_flood'], 1))
示例#7
0
    def make_veth_pairs_if_needed(self):
        def has_iface(iface):
            return os.system('ip link show {}'.format(iface)) == 0

        def make_veth(iface):
            os.system('ip link add type veth')
            os.system('ip link set {} up'.format(iface))
            peer = iface[:len('veth')] + str(int(iface[len('veth'):]) + 1)
            os.system('ip link set {} up'.format(peer))
            assert has_iface(iface)

        for iface_number in (0, 2):
            iface = 'veth{}'.format(iface_number)
            if not has_iface(iface):
                make_veth(iface)
                yield asleep(2)
示例#8
0
    def unlock(self, source):
        """
        Get the requested data from the server
        :param rpc_string: RPC request
        :param source: is the name of the configuration datastore accessed
        :return: (defeered) for RpcReply
        """
        if source not in self._locked:
            self._locked[source] = None

        if self._locked[source] is not None:
            yield asleep(random.uniform(0.1,
                                        0.5))  # Simulate NETCONF request delay

        self._locked[source] = None
        returnValue(RPCReply(_dummy_xml))
示例#9
0
 def _simulate_detection_of_onus(self, device_id):
     try:
         for i in xrange(1, 5):
             log.info('activate-olt-for-onu-{}'.format(i))
             vlan_id = self._olt_side_onu_activation(i)
             yield asleep(0.05)
             self.adapter_agent.child_device_detected(
                 parent_device_id=device_id,
                 parent_port_no=1,
                 child_device_type='simulated_onu',
                 proxy_address=Device.ProxyAddress(device_id=device_id,
                                                   channel_id=vlan_id),
                 admin_state=AdminState.ENABLED,
                 vlan=vlan_id)
     except Exception as e:
         log.exception('error', e=e)
示例#10
0
    def monitor_vcore_grpc_channel(self):
        log.debug('start-monitor-vcore-grpc-channel')

        while self.running:
            try:
                # If a subscription is not yet assigned then establish new GRPC connection
                # ... otherwise keep using existing connection details
                if self.subscription is None:
                    self._assign_grpc_attributes()

                # Send subscription request to register the current ofagent instance
                container_name = self.instance_id
                if self.grpc_client is None:
                    self.grpc_client = GrpcClient(self, self.channel,
                                                  self.grpc_timeout)
                subscription = yield self.grpc_client.subscribe(
                    OfAgentSubscriber(ofagent_id=container_name))

                # If the subscriber id matches the current instance
                # ... then the subscription has succeeded
                if subscription is not None and subscription.ofagent_id == container_name:
                    if self.subscription is None:
                        # Keep details on the current GRPC session and subscription
                        log.debug('subscription-with-vcore-successful',
                                  subscription=subscription)
                        self.subscription = subscription
                        self.grpc_client.start()

                    # Sleep a bit in between each subscribe
                    yield asleep(self.subscription_refresh_interval)

                    # Move on to next subscribe request
                    continue

                # The subscription did not succeed, reset and move on
                else:
                    log.info('subscription-with-vcore-unavailable',
                             subscription=subscription)

            except _Rendezvous, e:
                log.error('subscription-with-vcore-terminated',
                          exception=e,
                          status=e.code())

            except Exception as e:
                log.exception('unexpected-subscription-termination-with-vcore',
                              e=e)
示例#11
0
    def snapshot_mib(self):
        """
        Snapshot the MIB on the ONU and create a copy of our local MIB database

        :return: (pair) (db_copy, number_of_commands)
        """
        db_copy = None
        number_of_commands = None

        try:
            max_tries = MibResyncTask.max_db_copy_retries - 1

            for retries in xrange(0, max_tries + 1):
                # Send MIB Upload so ONU snapshots its MIB
                try:
                    number_of_commands = yield self.send_mib_upload()
                    self.stop_if_not_running()

                    if number_of_commands is None:
                        if retries >= max_tries:
                            db_copy = None
                            break

                except TimeoutError as e:
                    self.log.warn('timeout', e=e)
                    if retries >= max_tries:
                        raise

                    yield asleep(MibResyncTask.db_copy_retry_delay)
                    self.stop_if_not_running()
                    continue

                # Get a snapshot of the local MIB database
                db_copy = self._device.query_mib()

        except Exception as e:
            self.log.exception('mib-resync', e=e)
            raise

        # Handle initial failures

        if db_copy is None or number_of_commands is None:
            raise MibCopyException(
                'Failed to snapshot MIB copy after {} retries'.format(
                    MibResyncTask.max_db_copy_retries))

        returnValue((db_copy, number_of_commands))
示例#12
0
    def make_veth_pairs_if_needed(self):

        def has_iface(iface):
            return os.system('ip link show {}'.format(iface)) == 0

        def make_veth(iface):
            os.system('ip link add type veth')
            os.system('ip link set {} up'.format(iface))
            peer = iface[:len('veth')] + str(int(iface[len('veth'):]) + 1)
            os.system('ip link set {} up'.format(peer))
            assert has_iface(iface)

        for iface_number in (0, 2):
            iface = 'veth{}'.format(iface_number)
            if not has_iface(iface):
                make_veth(iface)
                yield asleep(2)
    def test_in_sync_with_software_values(self):
        self.setup_one_of_each()
        self.assertEqual(len(self.omci_agent.device_ids()), 1)

        sw_entity = 0x200
        sw_version = 'Beta-0.0.2'
        sw_hash = md5("just_a_test").hexdigest()
        prod_code = 'MySoftware'
        sw_active = True
        sw_committed = True
        sw_valid = True

        def stuff_db(_results):
            self._stuff_database([(SoftwareImage.class_id, sw_entity, {
                'version': sw_version,
                'is_committed': sw_committed,
                'is_active': sw_active,
                'is_valid': sw_valid,
                'product_code': prod_code,
                'image_hash': sw_hash,
            })])

        def do_my_tests(_results):
            config = self.onu_device.configuration

            images = config.software_images
            self.assertTrue(isinstance(images, list))
            self.assertEqual(len(images), 1)
            self.assertEqual(
                images[0].name,
                'running-revision' if sw_active else 'candidate-revision')
            self.assertEqual(images[0].version, sw_version)
            self.assertEqual(images[0].is_active, 1 if sw_active else 0)
            self.assertEqual(images[0].is_committed, 1 if sw_committed else 0)
            self.assertEqual(images[0].is_valid, 1 if sw_valid else 0)
            self.assertEqual(images[0].hash, sw_hash)

        # No capabilities available until started
        self.assertIsNone(self.onu_device.configuration)

        # Yield context so that MIB Database callLater runs.
        self.onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(stuff_db, self.not_called)
        d.addCallbacks(do_my_tests, self.not_called)
        return d
示例#14
0
    def test_mib_query_ok_if_dev_started(self):
        self.setup_one_of_each()

        onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
                                           self.adapter_agent)
        self.assertIsNotNone(onu_device)
        self.assertEqual(len(self.agent.device_ids()), 1)
        self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID),
                         onu_device)

        def not_called(_reason):
            onu_device.stop()
            assert False, 'Should never be called'

        def check_status(_results):
            # Device started. Query will succeed but nothing should be populated
            # but the most basic items

            results = onu_device.query_mib()
            self.assertTrue(isinstance(results, dict))
            self.assertEqual(results.get(DEVICE_ID_KEY), DEFAULT_ONU_DEVICE_ID)

            self.assertIsNotNone(results.get(VERSION_KEY))
            self.assertIsNotNone(results.get(CREATED_KEY))
            self.assertIsNone(
                results.get(MODIFIED_KEY))  # Created! but not yet modified

            self.assertEqual(results.get(MDS_KEY), 0)
            self.assertIsNone(results.get(LAST_SYNC_KEY))

            self.assertIsNone(results.get(CLASS_ID_KEY))

            # Stopping still allows a query.  Note you just delete a device
            # to clean up any associated databases
            onu_device.stop()
            results = onu_device.query_mib()
            self.assertTrue(isinstance(results, dict))

        # Yield context so that MIB Database callLater runs. This is a waiting
        # Async task from when the OpenOMCIAgent was started. But also start the
        # device so that it's queued async state machines can run as well
        onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(check_status, not_called)

        return d
示例#15
0
    def get_list_of_logical_devices_from_voltha(self):

        while True:
            log.info('Retrieve devices from voltha')
            try:
                stub = voltha_pb2.VolthaLocalServiceStub(self.channel)
                devices = stub.ListLogicalDevices(Empty()).items
                for device in devices:
                    log.info("Devices {} -> {}".format(device.id,
                                                       device.datapath_id))
                returnValue(devices)

            except Exception as e:
                log.error('Failure to retrieve devices from '
                          'voltha: {}'.format(repr(e)))

            log.info('reconnect', after_delay=self.voltha_retry_interval)
            yield asleep(self.voltha_retry_interval)
示例#16
0
    def upload_mib(self, number_of_commands):
        ########################################
        # Begin MIB Upload
        seq_no = None

        for seq_no in xrange(number_of_commands):
            max_tries = MibResyncTask.max_mib_upload_next_retries

            for retries in xrange(0, max_tries):
                try:
                    response = yield self._device.omci_cc.send_mib_upload_next(seq_no)

                    omci_msg = response.fields['omci_message'].fields
                    class_id = omci_msg['object_entity_class']
                    entity_id = omci_msg['object_entity_id']

                    # Filter out the 'mib_data_sync' from the database. We save that at
                    # the device level and do not want it showing up during a re-sync
                    # during data comparison

                    if class_id == OntData.class_id:
                        break

                    attributes = {k: v for k, v in omci_msg['object_data'].items()}

                    # Save to the database
                    self._db_active.set(self.device_id, class_id, entity_id, attributes)
                    break

                except TimeoutError:
                    self.log.warn('mib-resync-timeout', seq_no=seq_no,
                                  number_of_commands=number_of_commands)

                    if retries < max_tries - 1:
                        yield asleep(MibResyncTask.mib_upload_next_delay)
                    else:
                        raise

                except Exception as e:
                    self.log.exception('resync', e=e, seq_no=seq_no,
                                       number_of_commands=number_of_commands)

        returnValue(seq_no + 1)     # seq_no is zero based.
    def test_in_sync_with_cardholder_values(self):
        self.setup_one_of_each()
        self.assertEqual(len(self.omci_agent.device_ids()), 1)

        ch_entity = 0x102
        unit_type = 255
        clie_code = 'abc123'
        prot_ptr = 0

        def stuff_db(_results):
            self._stuff_database([(Cardholder.class_id, ch_entity, {
                'actual_plugin_unit_type': unit_type,
                'actual_equipment_id': clie_code,
                'protection_profile_pointer': prot_ptr,
            })])

        def do_my_tests(_results):
            config = self.onu_device.configuration

            cardholder = config.cardholder_entities
            self.assertTrue(isinstance(cardholder, dict))
            self.assertEqual(len(cardholder), 1)
            self.assertEqual(cardholder[ch_entity]['entity-id'], ch_entity)
            self.assertEqual(cardholder[ch_entity]['is-single-piece'],
                             ch_entity >= 256)
            self.assertEqual(cardholder[ch_entity]['slot-number'],
                             ch_entity & 0xFF)
            self.assertEqual(cardholder[ch_entity]['actual-plug-in-type'],
                             unit_type)
            self.assertEqual(cardholder[ch_entity]['actual-equipment-id'],
                             clie_code)
            self.assertEqual(cardholder[ch_entity]['protection-profile-ptr'],
                             prot_ptr)

        # No capabilities available until started
        self.assertIsNone(self.onu_device.configuration)

        # Yield context so that MIB Database callLater runs.
        self.onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(stuff_db, self.not_called)
        d.addCallbacks(do_my_tests, self.not_called)
        return d
示例#18
0
    def perform_task(self):
        """
        Get the 'mib_data_sync' attribute of the ONU
        """
        try:
            running = True

            if self._delay > 0:
                yield asleep(self._delay)

            if self._success:
                self.deferred.callback(self._value)

            self.deferred.errback(failure.Failure(self._value))
            running = False

        except Exception as e:
            running = False
            self.deferred.errback(failure.Failure(e))
示例#19
0
    def snapshot_alarm(self):
        """
        Snapshot the ALARM on the ONU and create a copy of our local ALARM database

        :return: (pair) (command_sequence_number)
        """
        command_sequence_number = None

        try:
            max_tries = AlarmResyncTask.max_retries - 1

            for retries in xrange(0, max_tries + 1):
                # Send ALARM Upload so ONU snapshots its ALARM
                try:
                    command_sequence_number = yield self.send_alarm_upload()
                    self.stop_if_not_running()

                    if command_sequence_number is None:
                        if retries >= max_tries:
                            break

                except TimeoutError as e:
                    self.log.warn('timeout', e=e)
                    if retries >= max_tries:
                        raise

                    yield asleep(AlarmResyncTask.retry_delay)
                    self.stop_if_not_running()
                    continue

        except Exception as e:
            self.log.exception('alarm-resync', e=e)
            raise

        # Handle initial failures

        if command_sequence_number is None:
            raise AlarmCopyException(
                'Failed to snapshot ALARM copy after {} retries'.format(
                    AlarmResyncTask.max_retries))

        returnValue(command_sequence_number)
    def test_defaults(self):
        self.setup_one_of_each()
        self.assertEqual(len(self.omci_agent.device_ids()), 1)

        @raises(AssertionError)
        def do_my_tests(_results):
            config = self.onu_device.configuration
            # Should raise assertion if never been synchronized
            config.version

        # No capabilities available until started
        self.assertIsNone(self.onu_device.configuration)

        # Yield context so that MIB Database callLater runs. This is a waiting
        # Async task from when the OpenOMCIAgent was started. But also start the
        # device so that it's queued async state machines can run as well
        self.onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(do_my_tests, self.not_called)
        return d
示例#21
0
    def edit_config(self, config, target='running', default_operation='merge',
                    test_option=None, error_option=None, lock_timeout=-1):
        """
        Loads all or part of the specified config to the target configuration datastore with the ability to lock
        the datastore during the edit.  To change multiple items, use your own calls to lock/unlock instead of
        using the lock_timeout value

        :param config is the configuration, which must be rooted in the config element. It can be specified
                      either as a string or an Element.format="xml"
        :param target is the name of the configuration datastore being edited
        :param default_operation if specified must be one of { 'merge', 'replace', or 'none' }
        :param test_option if specified must be one of { 'test_then_set', 'set' }
        :param error_option if specified must be one of { 'stop-on-error', 'continue-on-error', 'rollback-on-error' }
                            The 'rollback-on-error' error_option depends on the :rollback-on-error capability.
        :param lock_timeout if >0, the maximum number of seconds to hold a lock on the datastore while the edit
                            operation is underway

        :return: (defeered) for RpcReply
        """
        if lock_timeout > 0:
            try:
                request = self.lock(target, lock_timeout)
                yield request

            except Exception as e:
                log.exception('edit_config-lock', e=e)
                raise
        try:
            yield asleep(random.uniform(0.1, 2.0))  # Simulate NETCONF request delay

        except Exception as e:
            log.exception('edit_config', e=e)
            raise

        finally:
            if lock_timeout > 0:
                yield self.unlock(target)

        # TODO: Customize if needed...
        xml = _dummy_xml
        returnValue(RPCReply(xml))
示例#22
0
    def fix_attributes_only_in_mib(self, cid, eid, mib_data):
        successes = 0
        failures = 0
        try:
            # Get current and verify same as during audit it is missing from our DB
            attributes = mib_data.keys()
            current_entry = self._device.query_mib(cid, eid, attributes)

            if current_entry is not None and len(current_entry):
                clashes = {
                    k: v
                    for k, v in current_entry.items()
                    if k in attributes and v != mib_data[k]
                }

                if len(clashes):
                    raise ValueError(
                        'Existing DB entry for {}/{} attributes clash with audit data. Clash: {}'
                        .format(cid, eid, clashes))

            self._sync_sm.mib_set(cid, eid, mib_data)
            successes += len(mib_data)
            self.strobe_watchdog()

            # If we do nothing but DB updates for ALOT of MEs, we are
            # blocking other async twisted tasks, be kind and yield
            self._db_updates += 1
            if self._db_updates >= MibReconcileTask.max_sequential_db_updates:
                self._db_updates = 0
                self._local_deferred = yield asleep(
                    MibReconcileTask.db_update_pause)

        except ValueError as e:
            self.log.debug('attribute-changed', e)
            failures += len(mib_data)

        except Exception as e:
            self.log.exception('attribute-only-fix-mib', e=e, cid=cid, eid=eid)
            failures += len(mib_data)

        returnValue((successes, failures))
示例#23
0
文件: worker.py 项目: varcars/voltha
    def _track_my_peers(self, index):
        try:
            prev_index = index
            if self.mycore_store_id:
                # Wait for updates to the store assigment key
                is_timeout, (tmp_index, mappings) = yield \
                                self.coord.consul_get_with_timeout(
                                    key=self.coord.core_store_assignment_key,
                                    recurse=True,
                                    index=index,
                                    timeout=10)

                if is_timeout:
                    return

                # After timeout event the index returned from
                # consul_get_with_timeout is None.  If we are here it's not a
                # timeout, therefore the index is a valid one.
                index = tmp_index

                if mappings and index != prev_index:
                    new_map = loads(mappings[0]['Value'])
                    # Remove my id from my peers list
                    new_map.pop(self.mycore_store_id)
                    if self.peers_map is None or self.peers_map != new_map:
                        self.coord.publish_peers_map_change(new_map)
                        self.peers_map = new_map
                        log.info('peer-mapping-changed', mapping=new_map)
                else:
                    log.debug('no-mapping-change',
                              mappings=mappings,
                              index=index,
                              prev_index=prev_index)

        except Exception, e:
            log.exception('peer-track-error', e=e)
            yield asleep(
                self.coord.worker_config.get(
                    self.coord.
                    worker_config['assignments_track_error_to_avoid_flood'],
                    1))
    def test_in_sync_but_empty(self):
        self.setup_one_of_each()
        self.assertEqual(len(self.omci_agent.device_ids()), 1)

        def stuff_db(_results):
            self._stuff_database([])

        def do_my_tests(_results):
            config = self.onu_device.configuration

            # On no Class ID for requested property, None should be
            # returned
            self.assertIsNone(config.version)
            self.assertIsNone(config.traffic_management_option)
            self.assertIsNone(config.onu_survival_time)
            self.assertIsNone(config.equipment_id)
            self.assertIsNone(config.omcc_version)
            self.assertIsNone(config.vendor_product_code)
            self.assertIsNone(config.total_priority_queues)
            self.assertIsNone(config.total_traffic_schedulers)
            self.assertIsNone(config.total_gem_ports)
            self.assertIsNone(config.uptime)
            self.assertIsNone(config.connectivity_capability)
            self.assertIsNone(config.qos_configuration_flexibility)
            self.assertIsNone(config.priority_queue_scale_factor)
            self.assertIsNone(config.cardholder_entities)
            self.assertIsNone(config.circuitpack_entities)
            self.assertIsNone(config.software_images)
            self.assertIsNone(config.ani_g_entities)
            self.assertIsNone(config.uni_g_entities)

        # No capabilities available until started
        self.assertIsNone(self.onu_device.configuration)

        # Yield context so that MIB Database callLater runs.
        self.onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(stuff_db, self.not_called)
        d.addCallbacks(do_my_tests, self.not_called)
        return d
示例#25
0
    def upload_alarm(self, command_sequence_number):
        ########################################
        # Begin ALARM Upload
        seq_no = None

        for seq_no in xrange(command_sequence_number):
            max_tries = AlarmResyncTask.max_alarm_upload_next_retries

            for retries in xrange(0, max_tries):
                try:
                    response = yield self._device.omci_cc.send_get_all_alarm_next(seq_no)
                    self.strobe_watchdog()

                    omci_msg = response.fields['omci_message'].fields
                    alarm_class_id = omci_msg['alarmed_entity_class']
                    alarm_entity_id = omci_msg['alarmed_entity_id']

                    alarm_bit_map = omci_msg['alarm_bit_map']
                    attributes = {AlarmDbExternal.ALARM_BITMAP_KEY: alarm_bit_map}

                    # Save to the database
                    self._db_active.set(self.device_id, alarm_class_id,
                                        alarm_entity_id, attributes)
                    break

                except TimeoutError:
                    self.log.warn('alarm-resync-timeout', seq_no=seq_no,
                                  command_sequence_number=command_sequence_number)

                    if retries < max_tries - 1:
                        yield asleep(AlarmResyncTask.alarm_upload_next_delay)
                        self.strobe_watchdog()
                    else:
                        raise

                except Exception as e:
                    self.log.exception('resync', e=e, seq_no=seq_no,
                                       command_sequence_number=command_sequence_number)

        returnValue(seq_no + 1)     # seq_no is zero based and alarm table.
示例#26
0
    def keep_connected(self):
        """Keep reconnecting to the controller"""
        while not self.exiting:
            host, port = self.resolve_endpoint(self.controller_endpoint)
            log.info('connecting', host=host, port=port)
            try:
               with open("/ofagent/pki/voltha.key") as keyFile:
                    with open("/ofagent/pki/voltha.crt") as certFile:
                         clientCert = ssl.PrivateCertificate.loadPEM(
                              keyFile.read() + certFile.read())

               ctx = clientCert.options()
               self.connector = reactor.connectSSL(host, port, self, ctx)

            except Exception as e:
                log.exception('failed-to-connect', reason=e)


            self.d_disconnected = Deferred()
            yield self.d_disconnected
            log.debug('reconnect', after_delay=self.retry_interval)
            yield asleep(self.retry_interval)
示例#27
0
    def monitor_vcore_grpc_channel(self):
        log.debug('start-monitor-vcore-grpc-channel')

        while self.running:
            try:
                # If a subscription is not yet assigned then establish new GRPC connection
                # ... otherwise keep using existing connection details
                if self.subscription is None:
                    self._assign_grpc_attributes()

                # Send subscription request to register the current ofagent instance
                container_name = self.instance_id
                stub = voltha_pb2.VolthaLocalServiceStub(self.channel)
                subscription = stub.Subscribe(OfAgentSubscriber(ofagent_id=container_name))

                # If the subscriber id matches the current instance
                # ... then the subscription has succeeded
                if subscription is not None and subscription.ofagent_id == container_name:
                    if self.subscription is None:
                        # Keep details on the current GRPC session and subscription
                        log.debug('subscription-with-vcore-successful', subscription=subscription)
                        self.subscription = subscription
                        self.grpc_client = GrpcClient(self, self.channel).start()

                    # Sleep a bit in between each subscribe
                    yield asleep(self.subscription_refresh_interval)

                    # Move on to next subscribe request
                    continue

                # The subscription did not succeed, reset and move on
                else:
                    log.info('subscription-with-vcore-unavailable', subscription=subscription)

            except _Rendezvous, e:
                log.error('subscription-with-vcore-terminated',exception=e, status=e.code())

            except Exception as e:
                log.exception('unexpected-subscription-termination-with-vcore', e=e)
示例#28
0
    def _track_my_peers(self, index):
        try:
            prev_index = index
            if self.mycore_store_id:
                # Wait for updates to the store assigment key
                is_timeout, (tmp_index, mappings) = yield \
                                self.coord.consul_get_with_timeout(
                                    key=self.coord.core_store_assignment_key,
                                    recurse=True,
                                    index=index,
                                    timeout=10)

                if is_timeout:
                    return

                # After timeout event the index returned from
                # consul_get_with_timeout is None.  If we are here it's not a
                # timeout, therefore the index is a valid one.
                index=tmp_index

                if mappings and index != prev_index:
                    new_map = loads(mappings[0]['Value'])
                    # Remove my id from my peers list
                    new_map.pop(self.mycore_store_id)
                    if self.peers_map is None or self.peers_map != new_map:
                        self.coord.publish_peers_map_change(new_map)
                        self.peers_map = new_map
                        log.info('peer-mapping-changed', mapping=new_map)
                else:
                    log.debug('no-mapping-change', mappings=mappings,
                              index=index, prev_index=prev_index)

        except Exception, e:
            log.exception('peer-track-error', e=e)
            yield asleep(
                self.coord.worker_config.get(
                    self.coord.worker_config[
                        'assignments_track_error_to_avoid_flood'], 1))
示例#29
0
    def test_delete_scrubs_mib(self):
        self.setup_one_of_each()

        onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
                                           self.adapter_agent)
        self.assertIsNotNone(onu_device)
        self.assertEqual(len(self.agent.device_ids()), 1)
        self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID),
                         onu_device)

        def not_called(_reason):
            onu_device.stop()
            assert False, 'Should never be called'

        def check_status(_results):
            # Device started. Query will succeed but nothing should be populated
            # but the most basic items

            results = onu_device.query_mib()
            self.assertTrue(isinstance(results, dict))
            self.assertEqual(results.get(DEVICE_ID_KEY), DEFAULT_ONU_DEVICE_ID)

            # Delete should wipe out any MIB data. Note that a delete of a started
            # or stopped ONU device is allowed.  In this case we are deleting a
            # started ONU Device

            onu_device.delete()
            assert_raises(Exception, onu_device.query_mib)
            # TODO: When capabilities are supported, make sure capabilities get cleared as well

        # Yield context so that MIB Database callLater runs. This is a waiting
        # Async task from when the OpenOMCIAgent was started. But also start the
        # device so that it's queued async state machines can run as well
        onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(check_status, not_called)

        return d
    def test_in_sync_with_ani_g_values(self):
        self.setup_one_of_each()
        self.assertEqual(len(self.omci_agent.device_ids()), 1)

        entity_id = 0x0106
        tconts = 4
        dba_report = 4

        def stuff_db(_results):
            self._stuff_database([(AniG.class_id, entity_id, {
                'total_tcont_number': tconts,
                'piggyback_dba_reporting': dba_report
            })])

        def do_my_tests(_results):
            config = self.onu_device.configuration

            anig = config.ani_g_entities
            self.assertTrue(isinstance(anig, dict))
            self.assertEqual(len(anig), 1)

            self.assertEqual(anig[entity_id]['entity-id'], entity_id)
            self.assertEqual(anig[entity_id]['slot-number'],
                             (entity_id >> 8) & 0xff)
            self.assertEqual(anig[entity_id]['port-number'], entity_id & 0xff)
            self.assertEqual(anig[entity_id]['total-tcont-count'], tconts)
            self.assertEqual(anig[entity_id]['piggyback-dba-reporting'],
                             dba_report)

        # No capabilities available until started
        self.assertIsNone(self.onu_device.configuration)

        # Yield context so that MIB Database callLater runs.
        self.onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(stuff_db, self.not_called)
        d.addCallbacks(do_my_tests, self.not_called)
        return d
示例#31
0
    def edit_config(self,
                    config,
                    target='running',
                    default_operation='merge',
                    test_option=None,
                    error_option=None,
                    ignore_delete_error=False):
        """
        Loads all or part of the specified config to the target configuration datastore with the ability to lock
        the datastore during the edit.

        :param config is the configuration, which must be rooted in the config element. It can be specified
                      either as a string or an Element.format="xml"
        :param target is the name of the configuration datastore being edited
        :param default_operation if specified must be one of { 'merge', 'replace', or 'none' }
        :param test_option if specified must be one of { 'test_then_set', 'set' }
        :param error_option if specified must be one of { 'stop-on-error', 'continue-on-error', 'rollback-on-error' }
                            The 'rollback-on-error' error_option depends on the :rollback-on-error capability.
        :param ignore_delete_error: (bool) For some startup deletes/clean-ups, we do a
                                    delete high up in the config to get whole lists. If
                                    these lists are empty, this helps suppress any error
                                    message from NETConf on failure to delete an empty list
        :return: (deferred) for RpcReply
        """
        try:
            yield asleep(random.uniform(
                0.01, 0.02))  # Simulate NETCONF request delay

        except Exception as e:
            if ignore_delete_error and 'operation="delete"' in config.lower():
                returnValue('ignoring-delete-error')
            log.exception('edit_config', e=e)
            raise

        # TODO: Customize if needed...
        xml = _dummy_xml
        returnValue(RPCReply(xml))
示例#32
0
    def _track_members(self, index):

        try:
            (index,
             results) = yield self.coord.kv_get(self.coord.membership_prefix,
                                                index=index,
                                                recurse=True)

            matches = (self.member_id_match(e['Key']) for e in results or [])
            members = [m.group(2) for m in matches if m is not None]

            if members != self.members:
                log.info('membership-changed',
                         old_members_count=len(self.members),
                         new_members_count=len(members))
                self.members = members
                self._restart_reassignment_soak_timer()

        except Exception, e:
            log.exception('members-track-error', e=e)
            yield asleep(
                self.coord.leader_config.get(
                    self.coord.
                    leader_config['members_track_error_to_prevent_flood']), 1)
示例#33
0
    def reboot(self):
        from common.utils.asleep import asleep
        self.log.info('rebooting', device_id=self.device_id)
        self._cancel_deferred()

        # Drop registration for adapter messages
        self.adapter_agent.unregister_for_inter_adapter_messages()

        # Update the operational status to ACTIVATING and connect status to
        # UNREACHABLE
        device = self.adapter_agent.get_device(self.device_id)
        previous_oper_status = device.oper_status
        previous_conn_status = device.connect_status
        device.oper_status = OperStatus.ACTIVATING
        device.connect_status = ConnectStatus.UNREACHABLE
        device.reason = 'Rebooting'

        self.adapter_agent.update_device(device)

        # Sleep 10 secs, simulating a reboot
        # TODO: send alert and clear alert after the reboot
        yield asleep(10)  # TODO: Need to reboot for real

        # Register for adapter messages
        self.adapter_agent.register_for_inter_adapter_messages()

        # Change the operational status back to its previous state.  With a
        # real OLT the operational state should be the state the device is
        # after a reboot.
        # Get the latest device reference
        device = self.adapter_agent.get_device(self.device_id)
        device.oper_status = previous_oper_status
        device.connect_status = previous_conn_status
        device.reason = ''
        self.adapter_agent.update_device(device)
        self.log.info('rebooted', device_id=self.device_id)
示例#34
0
    def _track_workload(self, index):

        try:
            (index,
             results) = yield self.coord.kv_get(self.coord.workload_prefix,
                                                index=index,
                                                recurse=True)

            matches = (self.workload_id_match(e['Key']) for e in results)
            workload = [m.group(2) for m in matches if m is not None]

            if workload != self.workload:
                log.info('workload-changed',
                         old_workload_count=len(self.workload),
                         new_workload_count=len(workload))
                self.workload = workload
                self._restart_reassignment_soak_timer()

        except Exception, e:
            log.exception('workload-track-error', e=e)
            yield asleep(
                self.coord.leader_config.get(
                    self.coord.
                    leader_config['workload_track_error_to_prevent_flood'], 1))
示例#35
0
    def _finish_reboot(self, previous_oper_status, previous_conn_status,
                       reregister):
        from common.utils.asleep import asleep

        if not self.is_mock:
            # TODO: Do a simple poll and call this again if we timeout
            # _ONU_REBOOT_RETRY
            yield asleep(180)  # 3 minutes ...

        # Change the operational status back to its previous state.  With a
        # real OLT the operational state should be the state the device is
        # after a reboot.
        # Get the latest device reference
        device = self.adapter_agent.get_device(self.device_id)

        device.oper_status = previous_oper_status
        device.connect_status = previous_conn_status
        device.reason = ''
        self.adapter_agent.update_device(device)

        if reregister:
            self.adapter_agent.register_for_inter_adapter_messages()

        self.log.info('reboot-complete', device_id=self.device_id)
示例#36
0
    def test_mib_query_fails_if_dev_not_started(self):
        self.setup_one_of_each()

        onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
                                           self.adapter_agent)
        self.assertIsNotNone(onu_device)
        self.assertEqual(len(self.agent.device_ids()), 1)
        self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID),
                         onu_device)

        def not_called(_reason):
            assert False, 'Should never be called'

        def check_status(_results):
            # Device not yet started. Query should fail with KeyError since
            # ONU is not in database yet
            assert_raises(KeyError, onu_device.query_mib)

        # Yield context so that MIB Database callLater runs. This is a waiting
        # Async task from when the OpenOMCIAgent was started.
        d = asleep(0.2)
        d.addCallbacks(check_status, not_called)

        return d
    def reboot(self):
        from common.utils.asleep import asleep
        self.log.info('rebooting', device_id=self.device_id)
        self._cancel_deferred()

        # Drop registration for adapter messages
        self.adapter_agent.unregister_for_inter_adapter_messages()

        # Update the operational status to ACTIVATING and connect status to
        # UNREACHABLE
        device = self.adapter_agent.get_device(self.device_id)
        previous_oper_status = device.oper_status
        previous_conn_status = device.connect_status
        device.oper_status = OperStatus.ACTIVATING
        device.connect_status = ConnectStatus.UNREACHABLE
        device.reason = 'Rebooting'

        self.adapter_agent.update_device(device)

        # Sleep 10 secs, simulating a reboot
        # TODO: send alert and clear alert after the reboot
        yield asleep(10)    # TODO: Need to reboot for real

        # Register for adapter messages
        self.adapter_agent.register_for_inter_adapter_messages()

        # Change the operational status back to its previous state.  With a
        # real OLT the operational state should be the state the device is
        # after a reboot.
        # Get the latest device reference
        device = self.adapter_agent.get_device(self.device_id)
        device.oper_status = previous_oper_status
        device.connect_status = previous_conn_status
        device.reason = ''
        self.adapter_agent.update_device(device)
        self.log.info('rebooted', device_id=self.device_id)
示例#38
0
    def perform_alarm_sync_data(self):
        """
        Sync the time
        """
        self.log.info('perform-alarm-sync-data')

        try:
            device = self.omci_agent.get_device(self.device_id)

            #########################################
            # ONU Data (ME #2)
            # alarm_retrieval_mode=1, time=DEFAULT_OMCI_TIMEOUT
            self.strobe_watchdog()
            results = yield device.omci_cc.send_get_all_alarm(
                alarm_retrieval_mode=1)

            command_sequence_number = results.fields['omci_message'].fields[
                'number_of_commands']

            for seq_no in xrange(command_sequence_number):
                if not device.active or not device.omci_cc.enabled:
                    raise AlarmSyncDataFailure('OMCI and/or ONU is not active')

                for retry in range(0, 3):
                    try:
                        self.log.debug(
                            'alarm-data-next-request',
                            seq_no=seq_no,
                            retry=retry,
                            command_sequence_number=command_sequence_number)
                        self.strobe_watchdog()
                        yield device.omci_cc.send_get_all_alarm_next(seq_no)

                        self.log.debug(
                            'alarm-data-next-success',
                            seq_no=seq_no,
                            command_sequence_number=command_sequence_number)
                        break

                    except TimeoutError as e:
                        from common.utils.asleep import asleep
                        self.log.warn(
                            'alarm-data-timeout',
                            e=e,
                            seq_no=seq_no,
                            command_sequence_number=command_sequence_number)
                        if retry >= 2:
                            raise AlarmSyncDataFailure(
                                'Alarm timeout failure on req {} of {}'.format(
                                    seq_no + 1, command_sequence_number))

                        self.strobe_watchdog()
                        yield asleep(0.3)

            # Successful if here
            self.log.info('alarm-synchronized')
            self.deferred.callback(command_sequence_number)

        except TimeoutError as e:
            self.log.warn('alarm-sync-time-timeout', e=e)
            self.deferred.errback(failure.Failure(e))

        except Exception as e:
            self.log.exception('alarm-sync-time', e=e)
            self.deferred.errback(failure.Failure(e))
示例#39
0
 def GetEcho(self, request, context):
     print 'got Echo({}) request'.format(request.msg)
     yield asleep(request.delay)
     msg = request.msg + ' <<'
     print '    Echo({}) reply'.format(msg)
     returnValue(Echo(msg=msg))
示例#40
0
 def get_next_event(self):
     """called on the twisted thread"""
     yield asleep(0.000001)
     event = AsyncEvent(seq=self.event_seq, details='foo')
     self.event_seq += 1
     returnValue(event)
示例#41
0
    def _simulate_device_activation(self, device):

        # first we pretend that we were able to contact the device and obtain
        # additional information about it
        device.root = True
        device.vendor = 'simulated'
        device.model = 'n/a'
        device.hardware_version = 'n/a'
        device.firmware_version = 'n/a'
        device.serial_number = uuid4().hex
        device.connect_status = ConnectStatus.REACHABLE

        image1 = Image(name="olt_candidate1",
                       version="1.0",
                       hash="",
                       install_datetime=datetime.datetime.utcnow().isoformat(),
                       is_active=True,
                       is_committed=True,
                       is_valid=True)

        image2 = Image(name="olt_candidate2",
                       version="1.0",
                       hash="",
                       install_datetime=datetime.datetime.utcnow().isoformat(),
                       is_active=False,
                       is_committed=False,
                       is_valid=True)

        device.images.image.extend([image1, image2])

        self.adapter_agent.update_device(device)

        # Now set the initial PM configuration for this device
        self.pm_metrics = AdapterPmMetrics(device)
        pm_config = self.pm_metrics.make_proto()
        log.info("initial-pm-config", pm_config=pm_config)
        self.adapter_agent.update_device_pm_config(pm_config, init=True)

        # then shortly after we create some ports for the device
        yield asleep(0.05)
        nni_port = Port(port_no=2,
                        label='NNI facing Ethernet port',
                        type=Port.ETHERNET_NNI,
                        admin_state=AdminState.ENABLED,
                        oper_status=OperStatus.ACTIVE)
        self.adapter_agent.add_port(device.id, nni_port)
        self.adapter_agent.add_port(
            device.id,
            Port(port_no=1,
                 label='PON port',
                 type=Port.PON_OLT,
                 admin_state=AdminState.ENABLED,
                 oper_status=OperStatus.ACTIVE))

        # then shortly after we create the logical device with one port
        # that will correspond to the NNI port
        yield asleep(0.05)
        logical_device_id = uuid4().hex[:12]
        ld = LogicalDevice(
            # not setting id and datapth_id will let the adapter agent pick id
            desc=ofp_desc(mfr_desc='cord porject',
                          hw_desc='simualted pon',
                          sw_desc='simualted pon',
                          serial_num=uuid4().hex,
                          dp_desc='n/a'),
            switch_features=ofp_switch_features(
                n_buffers=256,  # TODO fake for now
                n_tables=2,  # TODO ditto
                capabilities=(  # TODO and ditto
                    OFPC_FLOW_STATS
                    | OFPC_TABLE_STATS
                    | OFPC_PORT_STATS
                    | OFPC_GROUP_STATS)),
            root_device_id=device.id)
        ld_initialized = self.adapter_agent.create_logical_device(ld)
        cap = OFPPF_1GB_FD | OFPPF_FIBER
        self.adapter_agent.add_logical_port(
            ld_initialized.id,
            LogicalPort(id='nni',
                        ofp_port=ofp_port(port_no=129,
                                          hw_addr=mac_str_to_tuple(
                                              '00:00:00:00:00:%02x' % 129),
                                          name='nni',
                                          config=0,
                                          state=OFPPS_LIVE,
                                          curr=cap,
                                          advertised=cap,
                                          peer=cap,
                                          curr_speed=OFPPF_1GB_FD,
                                          max_speed=OFPPF_1GB_FD),
                        device_id=device.id,
                        device_port_no=nni_port.port_no,
                        root_port=True))

        # and finally update to active
        device = self.adapter_agent.get_device(device.id)
        device.parent_id = ld_initialized.id
        device.oper_status = OperStatus.ACTIVE
        self.adapter_agent.update_device(device)

        reactor.callLater(0.1, self._simulate_detection_of_onus, device.id)
        self.start_kpi_collection(device.id)

        self.start_alarm_simulation(device.id)
示例#42
0
                devices = stub.ListLogicalDevices(Empty()).items
                for device in devices:
                    log.info("logical-device-entry", id=device.id, datapath_id=device.datapath_id)

                returnValue(devices)

            except _Rendezvous, e:
                log.error('vcore-communication-failure', exception=e, status=e.code())
                if e.code() == StatusCode.UNAVAILABLE:
                    os.system("kill -15 {}".format(os.getpid()))

            except Exception as e:
                log.exception('logical-devices-retrieval-failure', exception=e)

            log.info('reconnect', after_delay=self.vcore_retry_interval)
            yield asleep(self.vcore_retry_interval)

    def refresh_agent_connections(self, devices):
        """
        Based on the new device list, update the following state in the class:
        * agent_map
        * datapath_map
        * device_id_map
        :param devices: full device list freshly received from Voltha
        :return: None
        """

        # Use datapath ids for deciding what's new and what's obsolete
        desired_datapath_ids = set(d.datapath_id for d in devices)
        current_datapath_ids = set(datapath_ids[0] for datapath_ids in self.agent_map.iterkeys())
    def test_in_sync_with_circuitpack_values(self):
        self.setup_one_of_each()
        self.assertEqual(len(self.omci_agent.device_ids()), 1)

        cp_entity = 0x100
        num_ports = 1
        serial_num = 'ABCD01234'
        cp_version = '1234ABCD'
        vendor_id = 'AB-9876'
        tconts = 2
        pqueues = 64
        sched_count = 8

        def stuff_db(_results):
            self._stuff_database([(CircuitPack.class_id, cp_entity, {
                'number_of_ports':
                num_ports,
                'serial_number':
                serial_num,
                'version':
                cp_version,
                'vendor_id':
                vendor_id,
                'total_tcont_buffer_number':
                tconts,
                'total_priority_queue_number':
                pqueues,
                'total_traffic_scheduler_number':
                sched_count,
            })])

        def do_my_tests(_results):
            config = self.onu_device.configuration

            circuitpack = config.circuitpack_entities
            self.assertTrue(isinstance(circuitpack, dict))
            self.assertEqual(len(circuitpack), 1)
            self.assertEqual(circuitpack[cp_entity]['entity-id'], cp_entity)
            self.assertEqual(circuitpack[cp_entity]['number-of-ports'],
                             num_ports)
            self.assertEqual(circuitpack[cp_entity]['serial-number'],
                             serial_num)
            self.assertEqual(circuitpack[cp_entity]['version'], cp_version)
            self.assertEqual(circuitpack[cp_entity]['vendor-id'], vendor_id)
            self.assertEqual(circuitpack[cp_entity]['total-tcont-count'],
                             tconts)
            self.assertEqual(
                circuitpack[cp_entity]['total-priority-queue-count'], pqueues)
            self.assertEqual(
                circuitpack[cp_entity]['total-traffic-sched-count'],
                sched_count)

        # No capabilities available until started
        self.assertIsNone(self.onu_device.configuration)

        # Yield context so that MIB Database callLater runs.
        self.onu_device.start()
        d = asleep(0.2)
        d.addCallbacks(stuff_db, self.not_called)
        d.addCallbacks(do_my_tests, self.not_called)
        return d
示例#44
0
 def send_packets(port, n):
     for i in xrange(n):
         port.send(str(i))
         yield asleep(0.00001 * random.random())  # to interleave
示例#45
0
 def send_packets(port, n):
     for i in xrange(n):
         # packets have alternating VLAN ids 100 and 101
         pkt = Ether() / Dot1Q(vlan=100 + i % 2)
         port.send(str(pkt))
         yield asleep(0.00001 * random.random())  # to interleave
示例#46
0
 def _backoff(self, msg):
     wait_time = self.RETRY_BACKOFF[min(self.retries,
                                        len(self.RETRY_BACKOFF) - 1)]
     self.retries += 1
     log.error(msg, retry_in=wait_time)
     return asleep(wait_time)
示例#47
0
    def _simulate_device_activation(self, device):

        # first we verify that we got parent reference and proxy info
        assert device.parent_id
        assert device.proxy_address.device_id
        assert device.proxy_address.channel_id

        # we pretend that we were able to contact the device and obtain
        # additional information about it
        device.vendor = 'simulated onu adapter'
        device.model = 'n/a'
        device.hardware_version = 'n/a'
        device.firmware_version = 'n/a'
        device.serial_number = uuid4().hex
        device.connect_status = ConnectStatus.REACHABLE

        image1 = Image(name="onu_candidate1",
                       version="1.0",
                       hash="1234567892",
                       install_datetime=datetime.datetime.utcnow().isoformat(),
                       is_active=True,
                       is_committed=True,
                       is_valid=True)
        image2 = Image(name="onu_candidate2",
                       version="1.0",
                       hash="1234567893",
                       install_datetime=datetime.datetime.utcnow().isoformat(),
                       is_active=False,
                       is_committed=False,
                       is_valid=True)

        device.images.image.extend([image1, image2])

        self.adapter_agent.update_device(device)

        # then shortly after we create some ports for the device
        yield asleep(0.05)
        uni_port = Port(
            port_no=2,
            label='UNI facing Ethernet port',
            type=Port.ETHERNET_UNI,
            admin_state=AdminState.ENABLED,
            oper_status=OperStatus.ACTIVE
        )
        self.adapter_agent.add_port(device.id, uni_port)
        self.adapter_agent.add_port(device.id, Port(
            port_no=1,
            label='PON port',
            type=Port.PON_ONU,
            admin_state=AdminState.ENABLED,
            oper_status=OperStatus.ACTIVE,
            peers=[
                Port.PeerPort(
                    device_id=device.parent_id,
                    port_no=device.parent_port_no
                )
            ]
        ))

        # TODO adding vports to the logical device shall be done by agent?
        # then we create the logical device port that corresponds to the UNI
        # port of the device
        yield asleep(0.05)

        # obtain logical device id
        parent_device = self.adapter_agent.get_device(device.parent_id)
        logical_device_id = parent_device.parent_id
        assert logical_device_id

        # we are going to use the proxy_address.channel_id as unique number
        # and name for the virtual ports, as this is guaranteed to be unique
        # in the context of the OLT port, so it is also unique in the context
        # of the logical device
        port_no = device.proxy_address.channel_id
        cap = OFPPF_1GB_FD | OFPPF_FIBER
        self.adapter_agent.add_logical_port(logical_device_id, LogicalPort(
            id=str(port_no),
            ofp_port=ofp_port(
                port_no=port_no,
                hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % port_no),
                name='uni-{}'.format(port_no),
                config=0,
                state=OFPPS_LIVE,
                curr=cap,
                advertised=cap,
                peer=cap,
                curr_speed=OFPPF_1GB_FD,
                max_speed=OFPPF_1GB_FD
            ),
            device_id=device.id,
            device_port_no=uni_port.port_no
        ))

        # simulate a proxied message sending and receving a reply
        reply = yield self._simulate_message_exchange(device)

        # and finally update to "ACTIVE"
        device = self.adapter_agent.get_device(device.id)
        device.oper_status = OperStatus.ACTIVE
        self.adapter_agent.update_device(device)