Exemple #1
0
    def _rest_get_hostconfigs(self):
        try:
            response = self.odl_rest_client.get()
            response.raise_for_status()
            hostconfigs = response.json()['hostconfigs']['hostconfig']
        except exceptions.ConnectionError:
            LOG.error(_LE("Cannot connect to the Opendaylight Controller"),
                      exc_info=True)
            return None
        except exceptions.HTTPError as e:
            # restconf returns 404 on operation when there is no entry
            if e.response.status_code == codes.not_found:
                LOG.debug("Response code not_found (404)"
                          " treated as an empty list")
                return []
            else:
                LOG.warning(_LW("REST/GET odl hostconfig failed, "),
                            exc_info=True)
                return None
        except KeyError:
            LOG.error(_LE("got invalid hostconfigs"), exc_info=True)
            return None
        except Exception:
            LOG.warning(_LW("REST/GET odl hostconfig failed, "), exc_info=True)
            return None
        else:
            if LOG.isEnabledFor(logging.DEBUG):
                _hconfig_str = jsonutils.dumps(response,
                                               sort_keys=True,
                                               indent=4,
                                               separators=(',', ': '))
                LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str)

        return hostconfigs
Exemple #2
0
    def bind_port(self, port_context):
        """bind port using ODL host configuration."""
        # Get all ODL hostconfigs for this host and type
        agentdb = port_context.host_agents(self.L2_TYPE)

        if not agentdb:
            LOG.warning(_LW("No valid hostconfigs in agentsdb for host %s"),
                        port_context.host)
            return

        for raw_hconfig in agentdb:
            # do any $identifier substitution
            hconfig = self._substitute_hconfig_tmpl(port_context, raw_hconfig)

            # Found ODL hostconfig for this host in agentdb
            LOG.debug("ODLPORTBINDING bind port with hostconfig: %s", hconfig)

            if self._hconfig_bind_port(port_context, hconfig):
                break  # Port binding suceeded!
            else:  # Port binding failed!
                LOG.warning(
                    _LW("Failed to bind Port %(pid)s for host "
                        "%(host)s on network %(network)s."), {
                            'pid': port_context.current['id'],
                            'host': port_context.host,
                            'network': port_context.network.current['id']
                        })
        else:  # No hostconfig found for host in agentdb.
            LOG.warning(_LW("No ODL hostconfigs for host %s found in agentdb"),
                        port_context.host)
Exemple #3
0
    def _hconfig_bind_port(self, port_context, hconfig):
        """bind port after validating odl host configuration."""
        valid_segment = None

        for segment in port_context.segments_to_bind:
            if self._is_valid_segment(segment, hconfig['configurations']):
                valid_segment = segment
                break
        else:
            LOG.debug("No valid segments found!")
            return False

        confs = hconfig['configurations']['supported_vnic_types']

        # nova provides vnic_type in port_context to neutron.
        # neutron provides supported vif_type for binding based on vnic_type
        # in this case ODL hostconfigs has the vif_type to bind for vnic_type
        vnic_type = port_context.current.get(portbindings.VNIC_TYPE)

        if vnic_type != portbindings.VNIC_NORMAL:
            LOG.error(_LE("Binding failed: unsupported VNIC %s"), vnic_type)
            return False

        for conf in confs:
            if conf["vnic_type"] == vnic_type:
                vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS)
                LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type)
                break
        else:
            vif_type = portbindings.VIF_TYPE_OVS  # default: OVS
            LOG.warning(
                _LW("No supported vif type found for host %s!, "
                    "defaulting to OVS"), port_context.host)

        vif_details = conf.get('vif_details', {})

        if not vif_details:  # empty vif_details could be trouble, warn.
            LOG.warning(_LW("hostconfig:vif_details was empty!"))

        LOG.debug(
            "Bind port %(port)s on network %(network)s with valid "
            "segment %(segment)s and VIF type %(vif_type)r "
            "VIF details %(vif_details)r.", {
                'port': port_context.current['id'],
                'network': port_context.network.current['id'],
                'segment': valid_segment,
                'vif_type': vif_type,
                'vif_details': vif_details
            })

        port_context.set_binding(valid_segment[driver_api.ID],
                                 vif_type,
                                 vif_details,
                                 status=nl_const.PORT_STATUS_ACTIVE)
        return True
def _vif_type_from_conf(conf, userspace_datapath_types):

    # take vif_type from datapath_type ------------------------------------
    if conf.datapath_type:
        # take it from  datapath_type
        if conf.datapath_type in USERSPACE_DATAPATH_TYPES:
            if conf.datapath_type not in userspace_datapath_types:
                LOG.warning(
                    _LW("Using user space data path type '%s' even if no "
                        "support was detected."), conf.datapath_type)
            return 'vhostuser'
        else:
            return 'ovs'

    # take vif_type from ovs_dpdk -----------------------------------------
    if conf.ovs_dpdk is True:
        if userspace_datapath_types:
            return 'vhostuser'

        raise ValueError(
            _LE("--ovs_dpdk option was specified but the 'netdev' datapath_type "
                "was not enabled. "
                "To override use option --datapath_type=netdev"))

    elif conf.ovs_dpdk is False:
        return 'ovs'

    # take detected dtype -------------------------------------------------
    if userspace_datapath_types:
        return 'vhostuser'
    else:
        return 'ovs'
Exemple #5
0
 def __init__(self, service_plugin, validator=None):
     super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator)
     self.service_plugin = service_plugin
     self.client = odl_client.OpenDaylightRestClient.create_client()
     LOG.warning(
         _LW("ODL: OpenDaylight L2gateway driver has been deprecated"
             "and will be removed. Switch to driver_v2."))
Exemple #6
0
    def _get_and_update_hostconfigs(self, session=None):
        LOG.info(_LI("REST/GET hostconfigs from ODL"))

        hostconfigs = self._rest_get_hostconfigs()

        if not hostconfigs:
            LOG.warning(
                _LW("ODL hostconfigs REST/GET failed, "
                    "will retry on next poll"))
            return  # retry on next poll

        self._update_agents_db(hostconfigs=hostconfigs)
Exemple #7
0
def journal_recovery(session):
    for row in db.get_all_db_rows_by_state(session, odl_const.FAILED):
        try:
            LOG.debug("Attempting recovery of journal entry %s.", row)
            odl_resource = _CLIENT.get_client().get_resource(
                row.object_type, row.object_uuid)
            if odl_resource is not None:
                _handle_existing_resource(session, row)
            else:
                _handle_non_existing_resource(session, row)
        except UnsupportedResourceType:
            LOG.warning(_LW('Unsupported resource %s'), row.object_type)
        except Exception:
            LOG.exception(_LE("Failure while recovering journal entry %s."),
                          row)
    def __init__(self, vif_details=None, client=None):
        # Details for binding port
        self._vif_details = vif_details or {portbindings.CAP_PORT_FILTER: True}

        # Rest client used for getting network topology from ODL
        self._client = client or NetworkTopologyClient.create_client()

        # Table of NetworkTopologyElement
        self._elements_by_ip = cache.Cache(
            self._fetch_and_parse_network_topology)

        # Parsers used for processing network topology
        self._parsers = list(self._create_parsers())
        LOG.warning(
            _LW("networking-topology port binding controller is deprecated "
                "and will be removed. switch to pseudo-agentdb-binding."))
Exemple #9
0
    def _update_agents_db(self, hostconfigs):
        LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs")

        agents_db = self._get_neutron_db_plugin()

        if not agents_db:  # if ML2 is still initializing
            LOG.warning(
                _LW("ML2 still initializing, Will retry agentdb"
                    " update on next poll"))
            return  # Retry on next poll

        for host_config in hostconfigs:
            try:
                self.agentdb_row['host'] = host_config['host-id']
                self.agentdb_row['agent_type'] = host_config['host-type']
                self.agentdb_row['configurations'] = jsonutils.loads(
                    host_config['config'])

                agents_db.create_or_update_agent(context.get_admin_context(),
                                                 self.agentdb_row)
            except Exception:
                LOG.exception(_LE("Unable to update agentdb."))
                continue  # try next hostcofig
    def bind_port(self, port_context):
        """Set binding for a valid segment

        """
        host_name = port_context.host
        elements = list()
        try:
            # Append to empty list to add as much elements as possible
            # in the case it raises an exception
            elements.extend(self._fetch_elements_by_host(host_name))
        except Exception:
            LOG.exception(
                _LE('Error fetching elements for host %(host_name)r.'),
                {'host_name': host_name},
                exc_info=1)

        if not elements:
            # In case it wasn't able to find any network topology element
            # for given host then it uses the legacy OVS one keeping the old
            # behaviour
            LOG.warning(
                _LW('Using legacy OVS network topology element for port '
                    'binding for host: %(host_name)r.'),
                {'host_name': host_name})

            # Imported here to avoid cyclic module dependencies
            from networking_odl.ml2 import ovsdb_topology
            elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]

        # TODO(Federico Ressi): in the case there are more candidate virtual
        # switches instances for the same host it choses one for binding
        # port. As there isn't any know way to perform this selection it
        # selects a VIF type that is valid for all switches that have
        # been found and a VIF type valid for all them. This has to be improved
        for vif_type in self.valid_vif_types:
            vif_type_is_valid_for_all = True
            for element in elements:
                if vif_type not in element.valid_vif_types:
                    # it is invalid for at least one element: discard it
                    vif_type_is_valid_for_all = False
                    break

            if vif_type_is_valid_for_all:
                # This is the best VIF type valid for all elements
                LOG.debug(
                    "Found VIF type %(vif_type)r valid for all network "
                    "topology elements for host %(host_name)r.", {
                        'vif_type': vif_type,
                        'host_name': host_name
                    })

                for element in elements:
                    # It assumes that any element could be good for given host
                    # In most of the cases I expect exactely one element for
                    # every compute host
                    try:
                        return element.bind_port(port_context, vif_type,
                                                 self._vif_details)

                    except Exception:
                        LOG.exception(
                            _LE('Network topology element has failed binding '
                                'port:\n%(element)s'),
                            {'element': element.to_json()})

        LOG.error(
            _LE('Unable to bind port element for given host and valid VIF '
                'types:\n'
                '\thostname: %(host_name)s\n'
                '\tvalid VIF types: %(valid_vif_types)s'), {
                    'host_name': host_name,
                    'valid_vif_types': ', '.join(self.valid_vif_types)
                })
Exemple #11
0
    def fetch_all(self, keys, timeout):
        # this mean now in numbers
        current_clock = time.clock()
        # this is the moment in the future in which new entries will expires
        new_entries_timeout = current_clock + timeout
        # entries to be fetched because missing or expired
        new_entries = collections.OrderedDict()
        # all entries missing or expired
        missing = collections.OrderedDict()
        # captured error for the case a problem has to be reported
        cause_exc_info = None

        for key in keys:
            entry = self._entries.get(key)
            if entry is None or entry.is_expired(current_clock) or entry.error:
                # this entry has to be fetched
                new_entries[key] = missing[key] =\
                    self.create_new_entry(new_entries_timeout)
            elif entry.values:
                # Yield existing entry
                for value in entry.values:
                    yield key, value
            else:
                # This entry is not expired and there were no error where it
                # has been fetch. Therefore we accept that there are no values
                # for given key until it expires. This is going to produce a
                # KeyError if it is still missing at the end of this function.
                missing[key] = entry

        if missing:
            if new_entries:
                # Fetch some entries and update the cache
                try:
                    new_entry_keys = tuple(new_entries)
                    for key, value in self._fetch_all(new_entry_keys):
                        entry = new_entries.get(key)
                        if entry:
                            # Add fresh new value
                            entry.add_value(value)
                        else:
                            # This key was not asked, but we take it in any
                            # way. "Noli equi dentes inspicere donati."
                            new_entries[key] = entry = self.create_new_entry(
                                new_entries_timeout, value)

                # pylint: disable=broad-except
                except Exception:
                    # Something has gone wrong: update and yield what got until
                    # now before raising any error
                    cause_exc_info = sys.exc_info()
                    LOG.warning(_LW('Error fetching values for keys: %r'),
                                ', '.join(repr(k) for k in new_entry_keys),
                                exc_info=cause_exc_info)

                # update the cache with new fresh entries
                self._entries.update(new_entries)

            missing_keys = []
            for key, entry in six.iteritems(missing):
                if entry.values:
                    # yield entries that was missing before
                    for value in entry.values:
                        # Yield just fetched entry
                        yield key, value
                else:
                    if cause_exc_info:
                        # mark this entry as failed
                        entry.error = cause_exc_info
                    # after all this entry is still without any value
                    missing_keys.append(key)

            if missing_keys:
                # After all some entry is still missing, probably because the
                # key was invalid. It's time to raise an error.
                missing_keys = tuple(missing_keys)
                if not cause_exc_info:
                    # Search for the error cause in missing entries
                    for key in missing_keys:
                        error = self._entries[key].error
                        if error:
                            # A cached entry for which fetch method produced an
                            # error will produce the same error if fetch method
                            # fails to fetch it again without giving any error
                            # Is this what we want?
                            break

                    else:
                        # If the cause of the problem is not knwow then
                        # probably keys were wrong
                        message = 'Invalid keys: {!r}'.format(
                            ', '.join(missing_keys))
                        error = KeyError(message)

                    try:
                        raise error
                    except KeyError:
                        cause_exc_info = sys.exc_info()

                raise CacheFetchError(missing_keys=missing_keys,
                                      cause_exc_info=cause_exc_info)
    def bind_port(self, port_context):
        """Set binding for a valid segment

        """
        host_name = port_context.host
        elements = list()
        try:
            # Append to empty list to add as much elements as possible
            # in the case it raises an exception
            elements.extend(self._fetch_elements_by_host(host_name))
        except Exception:
            LOG.exception(
                _LE('Error fetching elements for host %(host_name)r.'),
                {'host_name': host_name}, exc_info=1)

        if not elements:
            # In case it wasn't able to find any network topology element
            # for given host then it uses the legacy OVS one keeping the old
            # behaviour
            LOG.warning(
                _LW('Using legacy OVS network topology element for port '
                    'binding for host: %(host_name)r.'),
                {'host_name': host_name})

            # Imported here to avoid cyclic module dependencies
            from networking_odl.ml2 import ovsdb_topology
            elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]

        # TODO(Federico Ressi): in the case there are more candidate virtual
        # switches instances for the same host it choses one for binding
        # port. As there isn't any know way to perform this selection it
        # selects a VIF type that is valid for all switches that have
        # been found and a VIF type valid for all them. This has to be improved
        for vif_type in self.valid_vif_types:
            vif_type_is_valid_for_all = True
            for element in elements:
                if vif_type not in element.valid_vif_types:
                    # it is invalid for at least one element: discard it
                    vif_type_is_valid_for_all = False
                    break

            if vif_type_is_valid_for_all:
                # This is the best VIF type valid for all elements
                LOG.debug(
                    "Found VIF type %(vif_type)r valid for all network "
                    "topology elements for host %(host_name)r.",
                    {'vif_type': vif_type, 'host_name': host_name})

                for element in elements:
                    # It assumes that any element could be good for given host
                    # In most of the cases I expect exactely one element for
                    # every compute host
                    try:
                        return element.bind_port(
                            port_context, vif_type, self._vif_details)

                    except Exception:
                        LOG.exception(
                            _LE('Network topology element has failed binding '
                                'port:\n%(element)s'),
                            {'element': element.to_json()})

        LOG.error(
            _LE('Unable to bind port element for given host and valid VIF '
                'types:\n'
                '\thostname: %(host_name)s\n'
                '\tvalid VIF types: %(valid_vif_types)s'),
            {'host_name': host_name,
             'valid_vif_types': ', '.join(self.valid_vif_types)})