Example #1
0
    def _fetch_and_parse_network_topology(self, addresses):
        # The cache calls this method to fecth new elements when at least one
        # of the addresses is not in the cache or it has expired.

        # pylint: disable=unused-argument
        LOG.info(_LI('Fetch network topology from ODL.'))
        response = self._client.get()
        response.raise_for_status()

        network_topology = response.json()
        if LOG.isEnabledFor(logging.DEBUG):
            topology_str = jsonutils.dumps(network_topology,
                                           sort_keys=True,
                                           indent=4,
                                           separators=(',', ': '))
            LOG.debug("Got network topology:\n%s", topology_str)

        at_least_one_element_for_asked_addresses = False
        for parser in self._parsers:
            try:
                for element in parser.parse_network_topology(network_topology):
                    if not isinstance(element, NetworkTopologyElement):
                        raise TypeError(
                            "Yield element doesn't implement interface "
                            "'NetworkTopologyElement': {!r}".format(element))
                    # the same element can be known by more host addresses
                    for host_address in element.host_addresses:
                        if host_address in addresses:
                            at_least_one_element_for_asked_addresses = True
                        yield host_address, element
            except Exception:
                LOG.exception(
                    _LE("Parser %(parser)r failed to parse network topology."),
                    {'parser': parser})

        if not at_least_one_element_for_asked_addresses:
            # this will mark entries for given addresses as failed to allow
            # calling this method again as soon it is requested and avoid
            # waiting for cache expiration
            raise ValueError(
                'No such topology element for given host addresses: {}'.format(
                    ', '.join(addresses)))
    def _fetch_and_parse_network_topology(self, addresses):
        # The cache calls this method to fecth new elements when at least one
        # of the addresses is not in the cache or it has expired.

        # pylint: disable=unused-argument
        LOG.info(_LI('Fetch network topology from ODL.'))
        response = self._client.get()
        response.raise_for_status()

        network_topology = response.json()
        if LOG.isEnabledFor(logging.DEBUG):
            topology_str = jsonutils.dumps(
                network_topology, sort_keys=True, indent=4,
                separators=(',', ': '))
            LOG.debug("Got network topology:\n%s", topology_str)

        at_least_one_element_for_asked_addresses = False
        for parser in self._parsers:
            try:
                for element in parser.parse_network_topology(network_topology):
                    if not isinstance(element, NetworkTopologyElement):
                        raise TypeError(
                            "Yield element doesn't implement interface "
                            "'NetworkTopologyElement': {!r}".format(element))
                    # the same element can be known by more host addresses
                    for host_address in element.host_addresses:
                        if host_address in addresses:
                            at_least_one_element_for_asked_addresses = True
                        yield host_address, element
            except Exception:
                LOG.exception(
                    _LE("Parser %(parser)r failed to parse network topology."),
                    {'parser': parser})

        if not at_least_one_element_for_asked_addresses:
            # this will mark entries for given addresses as failed to allow
            # calling this method again as soon it is requested and avoid
            # waiting for cache expiration
            raise ValueError(
                'No such topology element for given host addresses: {}'.format(
                    ', '.join(addresses)))
Example #3
0
    def sync_pending_row(self, exit_after_run=False):
        # Block until all pending rows are processed
        session = neutron_db_api.get_session()
        while not self.event.is_set():
            self.event.wait()
            # Clear the event and go back to waiting after
            # the sync block exits
            self.event.clear()
            while True:
                LOG.debug("Thread walking database")
                row = db.get_oldest_pending_db_row_with_lock(session)
                if not row:
                    LOG.debug("No rows to sync")
                    break

                # Validate the operation
                validate_func = db.VALIDATION_MAP[row.object_type]
                valid = validate_func(session, row.object_uuid,
                                      row.operation, row.data)
                if not valid:
                    LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a "
                                 "valid operation yet, skipping for now"),
                             {'operation': row.operation,
                              'type': row.object_type,
                              'uuid': row.object_uuid})

                    # Set row back to pending.
                    db.update_db_row_pending(session, row)
                    if exit_after_run:
                        break
                    continue

                LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"),
                         {'operation': row.operation, 'type': row.object_type,
                          'uuid': row.object_uuid})

                # Add code to sync this to ODL
                method, urlpath, to_send = self._json_data(row)

                try:
                    self.client.sendjson(method, urlpath, to_send)
                    db.update_processing_db_row_passed(session, row)
                except exceptions.ConnectionError as e:
                    # Don't raise the retry count, just log an error
                    LOG.error(_LE("Cannot connect to the Opendaylight "
                                  "Controller"))
                    # Set row back to pending
                    db.update_db_row_pending(session, row)
                    # Break our of the loop and retry with the next
                    # timer interval
                    break
                except Exception as e:
                    LOG.error(_LE("Error syncing %(type)s %(operation)s,"
                                  " id %(uuid)s Error: %(error)s"),
                              {'type': row.object_type,
                               'uuid': row.object_uuid,
                               'operation': row.operation,
                               'error': e.message})
                    db.update_pending_db_row_retry(session, row,
                                                   self._row_retry_count)
            LOG.debug("Clearing sync thread event")
            if exit_after_run:
                # Permanently waiting thread model breaks unit tests
                # Adding this arg to exit here only for unit tests
                break