def init_firewall(self, defer_refresh_firewall=False,
                   integration_bridge=None):
     firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop'
     LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
     if not _is_valid_driver_combination():
         LOG.warn(_LW("Driver configuration doesn't match "
                      "with enable_security_group"))
     firewall_class = firewall.load_firewall_driver_class(firewall_driver)
     try:
         self.firewall = firewall_class(
             integration_bridge=integration_bridge)
     except TypeError as e:
         LOG.warning(_LW("Firewall driver %(fw_driver)s doesn't accept "
                         "integration_bridge parameter in __init__(): "
                         "%(err)s"),
                     {'fw_driver': firewall_driver,
                      'err': e})
         self.firewall = firewall_class()
     # The following flag will be set to true if port filter must not be
     # applied as soon as a rule or membership notification is received
     self.defer_refresh_firewall = defer_refresh_firewall
     # Stores devices for which firewall should be refreshed when
     # deferred refresh is enabled.
     self.devices_to_refilter = set()
     # Flag raised when a global refresh is needed
     self.global_refresh_firewall = False
     self._use_enhanced_rpc = None
    def _get_candidates(self, plugin, context, sync_router):
        """Return L3 agents where a router could be scheduled."""
        with context.session.begin(subtransactions=True):
            # allow one router is hosted by just
            # one enabled l3 agent hosting since active is just a
            # timing problem. Non-active l3 agent can return to
            # active any time
            current_l3_agents = plugin.get_l3_agents_hosting_routers(context, [sync_router["id"]], admin_state_up=True)
            is_router_distributed = sync_router.get("distributed", False)
            if current_l3_agents and not is_router_distributed:
                LOG.debug(
                    "Router %(router_id)s has already been hosted " "by L3 agent %(agent_id)s",
                    {"router_id": sync_router["id"], "agent_id": current_l3_agents[0]["id"]},
                )
                return []

            active_l3_agents = plugin.get_l3_agents(context, active=True)
            if not active_l3_agents:
                LOG.warn(_LW("No active L3 agents"))
                return []
            potential_candidates = list(set(active_l3_agents) - set(current_l3_agents))
            new_l3agents = []
            if potential_candidates:
                new_l3agents = plugin.get_l3_agent_candidates(context, sync_router, potential_candidates)
                if not new_l3agents:
                    LOG.warn(_LW("No L3 agents can host the router %s"), sync_router["id"])
            return new_l3agents
Exemple #3
0
 def bind_port(self, context):
     LOG.debug("Attempting to bind port %(port)s on "
               "network %(network)s",
               {'port': context.current['id'],
                'network': context.network.current['id']})
     vnic_type = context.current.get(portbindings.VNIC_TYPE,
                                     portbindings.VNIC_NORMAL)
     if vnic_type not in self.supported_vnic_types:
         LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
                   vnic_type)
         return
     agents = context.host_agents(self.agent_type)
     if not agents:
         LOG.warning(_LW("Port %(pid)s on network %(network)s not bound, "
                         "no agent registered on host %(host)s"),
                     {'pid': context.current['id'],
                      'network': context.network.current['id'],
                      'host': context.host})
     for agent in agents:
         LOG.debug("Checking agent: %s", agent)
         if agent['alive']:
             for segment in context.segments_to_bind:
                 if self.try_to_bind_segment_for_agent(context, segment,
                                                       agent):
                     LOG.debug("Bound using segment: %s", segment)
                     return
         else:
             LOG.warning(_LW("Refusing to bind port %(pid)s to dead agent: "
                             "%(agent)s"),
                         {'pid': context.current['id'], 'agent': agent})
Exemple #4
0
    def _get_candidates(self, plugin, context, sync_router):
        """Return L3 agents where a router could be scheduled."""
        with context.session.begin(subtransactions=True):
            # allow one router is hosted by just
            # one enabled l3 agent hosting since active is just a
            # timing problem. Non-active l3 agent can return to
            # active any time
            current_l3_agents = plugin.get_l3_agents_hosting_routers(
                context, [sync_router['id']], admin_state_up=True)
            if current_l3_agents:
                LOG.debug('Router %(router_id)s has already been hosted '
                          'by L3 agent %(agent_id)s',
                          {'router_id': sync_router['id'],
                           'agent_id': current_l3_agents[0]['id']})
                return []

            active_l3_agents = plugin.get_l3_agents(context, active=True)
            if not active_l3_agents:
                LOG.warning(_LW('No active L3 agents'))
                return []
            candidates = plugin.get_l3_agent_candidates(context,
                                                        sync_router,
                                                        active_l3_agents)
            if not candidates:
                LOG.warning(_LW('No L3 agents can host the router %s'),
                            sync_router['id'])

            return candidates
 def send_events(self, batched_events):
     LOG.debug("Sending events: %s", batched_events)
     try:
         response = self.nclient.server_external_events.create(
             batched_events)
     except nova_exceptions.NotFound:
         LOG.warning(_LW("Nova returned NotFound for event: %s"),
                     batched_events)
     except Exception:
         LOG.exception(_LE("Failed to notify nova on events: %s"),
                       batched_events)
     else:
         if not isinstance(response, list):
             LOG.error(_LE("Error response returned from nova: %s"),
                       response)
             return
         response_error = False
         for event in response:
             try:
                 code = event['code']
             except KeyError:
                 response_error = True
                 continue
             if code != 200:
                 LOG.warning(_LW("Nova event: %s returned with failed "
                                 "status"), event)
             else:
                 LOG.info(_LI("Nova event response: %s"), event)
         if response_error:
             LOG.error(_LE("Error response returned from nova: %s"),
                       response)
Exemple #6
0
 def _load_all_extensions_from_path(self, path):
     # Sorting the extension list makes the order in which they
     # are loaded predictable across a cluster of load-balanced
     # Neutron Servers
     for f in sorted(os.listdir(path)):
         try:
             LOG.debug('Loading extension file: %s', f)
             mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
             ext_path = os.path.join(path, f)
             if file_ext.lower() == '.py' and not mod_name.startswith('_'):
                 mod = imp.load_source(mod_name, ext_path)
                 ext_name = mod_name[0].upper() + mod_name[1:]
                 new_ext_class = getattr(mod, ext_name, None)
                 if not new_ext_class:
                     LOG.warning(_LW('Did not find expected name '
                                     '"%(ext_name)s" in %(file)s'),
                                 {'ext_name': ext_name,
                                  'file': ext_path})
                     continue
                 new_ext = new_ext_class()
                 self.add_extension(new_ext)
         except Exception as exception:
             LOG.warning(_LW("Extension file %(f)s wasn't loaded due to "
                             "%(exception)s"),
                         {'f': f, 'exception': exception})
Exemple #7
0
    def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
        if self.eswitch_mgr.device_exists(device, pci_slot):
            try:
                self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
                                                       spoofcheck)
            except Exception:
                LOG.warning(_LW("Failed to set spoofcheck for device %s"),
                            device)
            LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
                     {"device": device, "spoofcheck": spoofcheck})

            try:
                self.eswitch_mgr.set_device_state(device, pci_slot,
                                                  admin_state_up)
            except exc.IpCommandOperationNotSupportedError:
                LOG.warning(_LW("Device %s does not support state change"),
                            device)
            except exc.SriovNicError:
                LOG.warning(_LW("Failed to set device %s state"), device)
                return
            if admin_state_up:
                # update plugin about port status
                self.plugin_rpc.update_device_up(self.context,
                                                 device,
                                                 self.agent_id,
                                                 cfg.CONF.host)
            else:
                self.plugin_rpc.update_device_down(self.context,
                                                   device,
                                                   self.agent_id,
                                                   cfg.CONF.host)
        else:
            LOG.info(_LI("No device with MAC %s defined on agent."), device)
    def remove_networks_from_down_agents(self):
        """Remove networks from down DHCP agents if admin state is up.

        Reschedule them if configured so.
        """

        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents("DHCP", agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = ncontext.get_admin_context()
        try:
            down_bindings = (
                context.session.query(NetworkDhcpAgentBinding)
                .join(agents_db.Agent)
                .filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up)
            )
            dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
            dead_bindings = [b for b in self._filter_bindings(context, down_bindings)]
            agents = self.get_agents_db(context, {"agent_type": [constants.AGENT_TYPE_DHCP]})
            active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)]
            if not active_agents:
                LOG.warning(_LW("No DHCP agents available, " "skipping rescheduling"))
                return
            for binding in dead_bindings:
                LOG.warning(
                    _LW(
                        "Removing network %(network)s from agent "
                        "%(agent)s because the agent did not report "
                        "to the server in the last %(dead_time)s "
                        "seconds."
                    ),
                    {"network": binding.network_id, "agent": binding.dhcp_agent_id, "dead_time": agent_dead_limit},
                )
                # save binding object to avoid ObjectDeletedError
                # in case binding is concurrently deleted from the DB
                saved_binding = {"net": binding.network_id, "agent": binding.dhcp_agent_id}
                try:
                    # do not notify agent if it considered dead
                    # so when it is restarted it won't see network delete
                    # notifications on its queue
                    self.remove_network_from_dhcp_agent(
                        context, binding.dhcp_agent_id, binding.network_id, notify=False
                    )
                except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
                    # measures against concurrent operation
                    LOG.debug("Network %(net)s already removed from DHCP " "agent %(agent)s", saved_binding)
                    # still continue and allow concurrent scheduling attempt
                except Exception:
                    LOG.exception(
                        _LE("Unexpected exception occurred while " "removing network %(net)s from agent " "%(agent)s"),
                        saved_binding,
                    )

                if cfg.CONF.network_auto_schedule:
                    self._schedule_network(context, saved_binding["net"], dhcp_notifier)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during network " "rescheduling"))
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                agent_mode = self._get_agent_mode(binding.l3_agent)
                if agent_mode == constants.L3_AGENT_MODE_DVR:
                    # rescheduling from l3 dvr agent on compute node doesn't
                    # make sense. Router will be removed from that agent once
                    # there are no dvr serviceable ports on that compute node
                    LOG.warn(_LW('L3 DVR agent on node %(host)s is down. '
                                 'Not rescheduling from agent in \'dvr\' '
                                 'mode.'), {'host': binding.l3_agent.host})
                    continue
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
Exemple #10
0
    def plug_new(self, network_id, port_id, device_name, mac_address,
                 bridge=None, namespace=None, prefix=None, mtu=None):
        """Plug in the interface."""
        if not bridge:
            bridge = self.conf.ovs_integration_bridge

        self.check_bridge_exists(bridge)

        ip = ip_lib.IPWrapper()
        tap_name = self._get_tap_name(device_name, prefix)

        if self.conf.ovs_use_veth:
            # Create ns_dev in a namespace if one is configured.
            root_dev, ns_dev = ip.add_veth(tap_name,
                                           device_name,
                                           namespace2=namespace)
            root_dev.disable_ipv6()
        else:
            ns_dev = ip.device(device_name)

        internal = not self.conf.ovs_use_veth
        self._ovs_add_port(bridge, tap_name, port_id, mac_address,
                           internal=internal)
        for i in range(9):
            # workaround for the OVS shy port syndrome. ports sometimes
            # hide for a bit right after they are first created.
            # see bug/1618987
            try:
                ns_dev.link.set_address(mac_address)
                break
            except RuntimeError as e:
                LOG.warning(_LW("Got error trying to set mac, retrying: %s"),
                            str(e))
                time.sleep(1)
        else:
            # didn't break, we give it one last shot without catching
            ns_dev.link.set_address(mac_address)

        # Add an interface created by ovs to the namespace.
        if not self.conf.ovs_use_veth and namespace:
            namespace_obj = ip.ensure_namespace(namespace)
            namespace_obj.add_device_to_namespace(ns_dev)

        # NOTE(ihrachys): the order here is significant: we must set MTU after
        # the device is moved into a namespace, otherwise OVS bridge does not
        # allow to set MTU that is higher than the least of all device MTUs on
        # the bridge
        if mtu:
            ns_dev.link.set_mtu(mtu)
            if self.conf.ovs_use_veth:
                root_dev.link.set_mtu(mtu)
        else:
            LOG.warning(_LW("No MTU configured for port %s"), port_id)

        ns_dev.link.set_up()
        if self.conf.ovs_use_veth:
            root_dev.link.set_up()
Exemple #11
0
    def _destroy_namespace_and_port(self):
        try:
            self.device_manager.destroy(self.network, self.interface_name)
        except RuntimeError:
            LOG.warning(_LW("Failed trying to delete interface: %s"), self.interface_name)

        ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
        try:
            ns_ip.netns.delete(self.network.namespace)
        except RuntimeError:
            LOG.warning(_LW("Failed trying to delete namespace: %s"), self.network.namespace)
Exemple #12
0
 def get_vif_port_set(self):
     edge_ports = set()
     results = self.get_ports_attributes("Interface", columns=["name", "external_ids", "ofport"], if_exists=True)
     for result in results:
         if result["ofport"] == UNASSIGNED_OFPORT:
             LOG.warn(_LW("Found not yet ready openvswitch port: %s"), result["name"])
         elif result["ofport"] == INVALID_OFPORT:
             LOG.warn(_LW("Found failed openvswitch port: %s"), result["name"])
         elif "attached-mac" in result["external_ids"]:
             port_id = self.portid_from_external_ids(result["external_ids"])
             if port_id:
                 edge_ports.add(port_id)
     return edge_ports
    def vxlan_mcast_supported(self):
        if not cfg.CONF.VXLAN.vxlan_group:
            LOG.warning(
                _LW("VXLAN muticast group(s) must be provided in " "vxlan_group option to enable VXLAN MCAST mode")
            )
            return False
        if not ip_lib.iproute_arg_supported(["ip", "link", "add", "type", "vxlan"], "proxy"):
            LOG.warning(
                _LW('Option "%(option)s" must be supported by command ' '"%(command)s" to enable %(mode)s mode'),
                {"option": "proxy", "command": "ip link add type vxlan", "mode": "VXLAN MCAST"},
            )

            return False
        return True
 def _get_enabled_agents(self, context, network, agents, method, payload):
     """Get the list of agents who can provide services."""
     if not agents:
         return []
     network_id = network['id']
     enabled_agents = agents
     if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
         enabled_agents = [x for x in agents if x.admin_state_up]
     active_agents = [x for x in agents if x.is_active]
     len_enabled_agents = len(enabled_agents)
     len_active_agents = len(active_agents)
     if len_active_agents < len_enabled_agents:
         LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents "
                         "associated with network '%(net_id)s' "
                         "are marked as active, so notifications "
                         "may be sent to inactive agents."),
                     {'active': len_active_agents,
                      'total': len_enabled_agents,
                      'net_id': network_id})
     if not enabled_agents:
         num_ports = self.plugin.get_ports_count(
             context, {'network_id': [network_id]})
         notification_required = (
             num_ports > 0 and len(network['subnets']) >= 1)
         if notification_required:
             LOG.error(_LE("Will not send event %(method)s for network "
                           "%(net_id)s: no agent available. Payload: "
                           "%(payload)s"),
                       {'method': method,
                        'net_id': network_id,
                        'payload': payload})
     return enabled_agents
Exemple #15
0
 def _delete_sg(self, plugin_context, sg_id, clean_session=True):
     try:
         self._delete_resource(self._core_plugin, plugin_context,
                               'security_group', sg_id,
                               clean_session=clean_session)
     except ext_sg.SecurityGroupNotFound:
         LOG.warning(_LW('Security Group %s already deleted'), sg_id)
Exemple #16
0
 def _delete_policy_target(self, plugin_context, pt_id, clean_session=True):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'policy_target', pt_id, False,
                               clean_session=clean_session)
     except gp_ext.PolicyTargetNotFound:
         LOG.warning(_LW('Policy Rule Set %s already deleted'), pt_id)
Exemple #17
0
 def _delete_l3_policy(self, plugin_context, l3p_id, clean_session=True):
     try:
         self._delete_resource(self._group_policy_plugin,
                               plugin_context, 'l3_policy', l3p_id, False,
                               clean_session=clean_session)
     except gp_ext.L3PolicyNotFound:
         LOG.warning(_LW('L3 Policy %s already deleted'), l3p_id)
Exemple #18
0
 def _delete_port(self, plugin_context, port_id, clean_session=True):
     try:
         self._delete_resource(self._core_plugin,
                               plugin_context, 'port', port_id,
                               clean_session=clean_session)
     except n_exc.PortNotFound:
         LOG.warning(_LW('Port %s already deleted'), port_id)
Exemple #19
0
 def _delete_network(self, plugin_context, network_id, clean_session=True):
     try:
         self._delete_resource(self._core_plugin, plugin_context,
                               'network', network_id,
                               clean_session=clean_session)
     except n_exc.NetworkNotFound:
         LOG.warning(_LW('Network %s already deleted'), network_id)
Exemple #20
0
    def _update_arp_entry(self, ip, mac, subnet_id, operation):
        """Add or delete arp entry into router namespace for the subnet."""
        port = self._get_internal_port(subnet_id)
        # update arp entry only if the subnet is attached to the router
        if not port:
            return False

        try:
            # TODO(mrsmith): optimize the calls below for bulk calls
            interface_name = self.get_internal_device_name(port['id'])
            device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
            if device.exists():
                if operation == 'add':
                    device.neigh.add(ip, mac)
                elif operation == 'delete':
                    device.neigh.delete(ip, mac)
                return True
            else:
                if operation == 'add':
                    LOG.warning(_LW("Device %s does not exist so ARP entry "
                                    "cannot be updated, will cache "
                                    "information to be applied later "
                                    "when the device exists"),
                                device)
                    self._cache_arp_entry(ip, mac, subnet_id, operation)
                return False
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("DVR: Failed updating arp entry"))
Exemple #21
0
    def get_or_create_ofport(self, port):
        port_id = port['device']
        try:
            of_port = self.sg_port_map.ports[port_id]
        except KeyError:
            ovs_port = self.int_br.br.get_vif_port_by_id(port_id)
            if not ovs_port:
                raise OVSFWPortNotFound(port_id=port_id)

            try:
                other_config = self.int_br.br.db_get_val(
                    'Port', ovs_port.port_name, 'other_config')
                port_vlan_id = int(other_config['tag'])
            except (KeyError, TypeError):
                LOG.warning(_LW("Cannot get tag for port %(port_id)s from "
                                "its other_config: %(other_config)s"),
                            {'port_id': port_id,
                             'other_config': other_config})
                port_vlan_id = ovs_consts.DEAD_VLAN_TAG
            of_port = OFPort(port, ovs_port, port_vlan_id)
            self.sg_port_map.create_port(of_port, port)
        else:
            self.sg_port_map.update_port(of_port, port)

        return of_port
    def vxlan_ucast_supported(self):
        if not cfg.CONF.VXLAN.l2_population:
            return False
        if not ip_lib.iproute_arg_supported(
                ['bridge', 'fdb'], 'append'):
            LOG.warning(_LW('Option "%(option)s" must be supported by command '
                            '"%(command)s" to enable %(mode)s mode'),
                        {'option': 'append',
                         'command': 'bridge fdb',
                         'mode': 'VXLAN UCAST'})
            return False

        test_iface = None
        for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1):
            if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id))
                    or ip_lib.vxlan_in_use(seg_id)):
                continue
            test_iface = self.ensure_vxlan(seg_id)
            break
        else:
            LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
            return False

        try:
            utils.execute(
                cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
                     'dev', test_iface, 'dst', '1.1.1.1'],
                run_as_root=True, log_fail_as_error=False)
            return True
        except RuntimeError:
            return False
        finally:
            self.delete_interface(test_iface)
    def vxlan_mcast_supported(self):
        if not cfg.CONF.VXLAN.vxlan_group:
            LOG.warning(_LW('VXLAN muticast group(s) must be provided in '
                            'vxlan_group option to enable VXLAN MCAST mode'))
            return False
        if not ip_lib.iproute_arg_supported(
                ['ip', 'link', 'add', 'type', 'vxlan'],
                'proxy'):
            LOG.warning(_LW('Option "%(option)s" must be supported by command '
                            '"%(command)s" to enable %(mode)s mode'),
                        {'option': 'proxy',
                         'command': 'ip link add type vxlan',
                         'mode': 'VXLAN MCAST'})

            return False
        return True
 def get_bridge_name(network_id):
     if not network_id:
         LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
                         "bridge name"))
     bridge_name = BRIDGE_NAME_PREFIX + \
         network_id[:lconst.RESOURCE_ID_LENGTH]
     return bridge_name
 def get_tap_device_name(interface_id):
     if not interface_id:
         LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
                         "tap device name"))
     tap_device_name = constants.TAP_DEVICE_PREFIX + \
         interface_id[:lconst.RESOURCE_ID_LENGTH]
     return tap_device_name
    def _notify_agents_router_rescheduled(self, context, router_id,
                                          old_agents, new_agents):
        l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
        if not l3_notifier:
            return

        old_hosts = [agent['host'] for agent in old_agents]
        new_hosts = [agent['host'] for agent in new_agents]
        for host in set(old_hosts) - set(new_hosts):
            l3_notifier.router_removed_from_agent(
                context, router_id, host)

        for agent in new_agents:
            # Need to make sure agents are notified or unschedule otherwise
            for attempt in range(AGENT_NOTIFY_MAX_ATTEMPTS):
                try:
                    l3_notifier.router_added_to_agent(
                        context, [router_id], agent['host'])
                    break
                except oslo_messaging.MessagingException:
                    LOG.warning(_LW('Failed to notify L3 agent on host '
                                    '%(host)s about added router. Attempt '
                                    '%(attempt)d out of %(max_attempts)d'),
                                {'host': agent['host'], 'attempt': attempt + 1,
                                 'max_attempts': AGENT_NOTIFY_MAX_ATTEMPTS})
            else:
                self._unbind_router(context, router_id, agent['id'])
                raise l3agentscheduler.RouterReschedulingFailed(
                    router_id=router_id)
Exemple #27
0
    def auto_schedule_routers(self, plugin, context, host, router_ids):
        """Schedule non-hosted routers to L3 Agent running on host.

        If router_ids is given, each router in router_ids is scheduled
        if it is not scheduled yet. Otherwise all unscheduled routers
        are scheduled.
        Do not schedule the routers which are hosted already
        by active l3 agents.

        :returns: True if routers have been successfully assigned to host
        """
        l3_agent = plugin.get_enabled_agent_on_host(
            context, lib_const.AGENT_TYPE_L3, host)
        if not l3_agent:
            return False

        unscheduled_routers = self._get_routers_to_schedule(
            context, plugin, router_ids)
        if not unscheduled_routers:
            if utils.is_extension_supported(
                    plugin, lib_const.L3_HA_MODE_EXT_ALIAS):
                return self._schedule_ha_routers_to_additional_agent(
                    plugin, context, l3_agent)

        target_routers = self._get_routers_can_schedule(
            context, plugin, unscheduled_routers, l3_agent)
        if not target_routers:
            LOG.warning(_LW('No routers compatible with L3 agent '
                            'configuration on host %s'), host)
            return False

        self._bind_routers(context, plugin, target_routers, l3_agent)
        return True
Exemple #28
0
 def _port_action(self, plugin, context, port, action):
     """Perform port operations taking care of concurrency issues."""
     try:
         if action == 'create_port':
             return p_utils.create_port(plugin, context, port)
         elif action == 'update_port':
             return plugin.update_port(context, port['id'], port)
         else:
             msg = _('Unrecognized action')
             raise exceptions.Invalid(message=msg)
     except (db_exc.DBError,
             exceptions.NetworkNotFound,
             exceptions.SubnetNotFound,
             exceptions.IpAddressGenerationFailure) as e:
         with excutils.save_and_reraise_exception(reraise=False) as ctxt:
             if isinstance(e, exceptions.IpAddressGenerationFailure):
                 # Check if the subnet still exists and if it does not,
                 # this is the reason why the ip address generation failed.
                 # In any other unlikely event re-raise
                 try:
                     subnet_id = port['port']['fixed_ips'][0]['subnet_id']
                     plugin.get_subnet(context, subnet_id)
                 except exceptions.SubnetNotFound:
                     pass
                 else:
                     ctxt.reraise = True
             net_id = port['port']['network_id']
             LOG.warning(_LW("Action %(action)s for network %(net_id)s "
                             "could not complete successfully: %(reason)s"),
                         {"action": action, "net_id": net_id, 'reason': e})
Exemple #29
0
    def _notify_floating_ip_change(self, context, floating_ip):
        router_id = floating_ip['router_id']
        fixed_port_id = floating_ip['port_id']
        # we need to notify agents only in case Floating IP is associated
        if not router_id or not fixed_port_id:
            return

        try:
            # using admin context as router may belong to admin tenant
            router = self._get_router(context.elevated(), router_id)
        except l3.RouterNotFound:
            LOG.warning(_LW("Router %s was not found. "
                            "Skipping agent notification."),
                        router_id)
            return

        if is_distributed_router(router):
            host = self._get_dvr_service_port_hostid(context, fixed_port_id)
            dest_host = self._get_dvr_migrating_service_port_hostid(
                context, fixed_port_id)
            self.l3_rpc_notifier.routers_updated_on_host(
                context, [router_id], host)
            if dest_host and dest_host != host:
                self.l3_rpc_notifier.routers_updated_on_host(
                    context, [router_id], dest_host)
        else:
            self.notify_router_updated(context, router_id)
Exemple #30
0
    def enable_isolated_metadata_proxy(self, network):

        # The proxy might work for either a single network
        # or all the networks connected via a router
        # to the one passed as a parameter
        kwargs = {'network_id': network.id}
        # When the metadata network is enabled, the proxy might
        # be started for the router attached to the network
        if self.conf.enable_metadata_network:
            router_ports = [port for port in network.ports
                            if (port.device_owner in
                                constants.ROUTER_INTERFACE_OWNERS)]
            if router_ports:
                # Multiple router ports should not be allowed
                if len(router_ports) > 1:
                    LOG.warning(_LW("%(port_num)d router ports found on the "
                                    "metadata access network. Only the port "
                                    "%(port_id)s, for router %(router_id)s "
                                    "will be considered"),
                                {'port_num': len(router_ports),
                                 'port_id': router_ports[0].id,
                                 'router_id': router_ports[0].device_id})
                kwargs = {'router_id': router_ports[0].device_id}
                self._metadata_routers[network.id] = router_ports[0].device_id

        metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
            self._process_monitor, network.namespace, dhcp.METADATA_PORT,
            self.conf, **kwargs)
 def _delete_external_policy(self, plugin_context, ep_id):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'external_policy', ep_id, False)
     except gp_ext.ExternalPolicyNotFound:
         LOG.warning(_LW('External Policy %s already deleted'), ep_id)
 def _delete_policy_rule_set(self, plugin_context, prs_id):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'policy_rule_set', prs_id, False)
     except gp_ext.PolicyRuleSetNotFound:
         LOG.warning(_LW('Policy Rule Set %s already deleted'), prs_id)
 def _delete_servicechain_instance(self, plugin_context, sci_id):
     try:
         self._delete_resource(self._servicechain_plugin, plugin_context,
                               'servicechain_instance', sci_id, False)
     except sc_ext.ServiceChainInstanceNotFound:
         LOG.warning(_LW("servicechain %s already deleted"), sci_id)
Exemple #34
0
    def _get_gbp_details(self, context, request):
        device = request.get('device')
        host = request.get('host')

        core_plugin = manager.NeutronManager.get_plugin()
        port_id = core_plugin._device_to_port_id(context, device)
        port_context = core_plugin.get_bound_port_context(
            context, port_id, host)
        if not port_context:
            LOG.warning(
                _LW("Device %(device)s requested by agent "
                    "%(agent_id)s not found in database"), {
                        'device': port_id,
                        'agent_id': request.get('agent_id')
                    })
            return {'device': device}

        port = port_context.current
        if port[portbindings.HOST_ID] != host:
            LOG.warning(
                _LW("Device %(device)s requested by agent "
                    "%(agent_id)s not found bound for host %(host)s"), {
                        'device': port_id,
                        'host': host,
                        'agent_id': request.get('agent_id')
                    })
            return

        session = context.session
        with session.begin(subtransactions=True):
            # REVISIT(rkukura): Should AIM resources be
            # validated/created here if necessary? Also need to avoid
            # creating any new name mappings without first getting
            # their resource names.

            # TODO(rkukura): For GBP, we need to use the EPG
            # associated with the port's PT's PTG. For now, we just use the
            # network's default EPG.

            # TODO(rkukura): Use common tenant for shared networks.

            # TODO(rkukura): Scope the tenant's AIM name.

            network = port_context.network.current
            epg_tenant_aname = self.name_mapper.tenant(session,
                                                       network['tenant_id'])
            epg_aname = self.name_mapper.network(session, network['id'],
                                                 network['name'])

        promiscuous_mode = port['device_owner'] in PROMISCUOUS_TYPES

        details = {
            'allowed_address_pairs': port['allowed_address_pairs'],
            'app_profile_name': AP_NAME,
            'device': device,
            'enable_dhcp_optimization': self.enable_dhcp_opt,
            'enable_metadata_optimization': self.enable_metadata_opt,
            'endpoint_group_name': epg_aname,
            'host': host,
            'l3_policy_id': network['tenant_id'],  # TODO(rkukura)
            'mac_address': port['mac_address'],
            'port_id': port_id,
            'promiscuous_mode': promiscuous_mode,
            'ptg_tenant': epg_tenant_aname,
            'subnets': self._get_subnet_details(core_plugin, context, port)
        }

        if port['device_owner'].startswith('compute:') and port['device_id']:
            # REVISIT(rkukura): Do we need to map to name using nova client?
            details['vm-name'] = port['device_id']

        # TODO(rkukura): Mark active allowed_address_pairs

        # TODO(rkukura): Add the following details common to the old
        # GBP and ML2 drivers: floating_ip, host_snat_ips, ip_mapping,
        # vrf_name, vrf_subnets, vrf_tenant.

        # TODO(rkukura): Add the following details unique to the old
        # ML2 driver: attestation, interface_mtu.

        # TODO(rkukura): Add the following details unique to the old
        # GBP driver: extra_details, extra_ips, fixed_ips,
        # l2_policy_id.

        return details
    def auto_schedule_networks(self, plugin, context, host):
        """Schedule non-hosted networks to the DHCP agent on the specified
           host.
        """
        agents_per_network = cfg.CONF.dhcp_agents_per_network
        # a list of (agent, net_ids) tuples
        bindings_to_add = []
        with context.session.begin(subtransactions=True):
            fields = ['network_id', 'enable_dhcp', 'segment_id']
            subnets = plugin.get_subnets(context, fields=fields)
            net_ids = {}
            net_segment_ids = collections.defaultdict(set)
            for s in subnets:
                if s['enable_dhcp']:
                    net_segment_ids[s['network_id']].add(s.get('segment_id'))
            for network_id, segment_ids in net_segment_ids.items():
                is_routed_network = any(segment_ids)
                net_ids[network_id] = is_routed_network
            if not net_ids:
                LOG.debug('No non-hosted networks')
                return False
            query = context.session.query(agents_db.Agent)
            query = query.filter(agents_db.Agent.agent_type ==
                                 constants.AGENT_TYPE_DHCP,
                                 agents_db.Agent.host == host,
                                 agents_db.Agent.admin_state_up == sql.true())
            dhcp_agents = query.all()

            query = context.session.query(
                segments_db.SegmentHostMapping.segment_id)
            query = query.filter(segments_db.SegmentHostMapping.host == host)
            segments_on_host = {s.segment_id for s in query}

            for dhcp_agent in dhcp_agents:
                if agents_db.AgentDbMixin.is_agent_down(
                    dhcp_agent.heartbeat_timestamp):
                    LOG.warning(_LW('DHCP agent %s is not active'),
                                dhcp_agent.id)
                    continue
                for net_id, is_routed_network in net_ids.items():
                    agents = plugin.get_dhcp_agents_hosting_networks(
                        context, [net_id])
                    segments_on_network = net_segment_ids[net_id]
                    if is_routed_network:
                        if len(segments_on_network & segments_on_host) == 0:
                            continue
                    else:
                        if len(agents) >= agents_per_network:
                            continue
                    if any(dhcp_agent.id == agent.id for agent in agents):
                        continue
                    net = plugin.get_network(context, net_id)
                    az_hints = (net.get(az_ext.AZ_HINTS) or
                                cfg.CONF.default_availability_zones)
                    if (az_hints and
                        dhcp_agent['availability_zone'] not in az_hints):
                        continue
                    bindings_to_add.append((dhcp_agent, net_id))
        # do it outside transaction so particular scheduling results don't
        # make other to fail
        for agent, net_id in bindings_to_add:
            self.resource_filter.bind(context, [agent], net_id)
        return True
Exemple #36
0
 def get_tap_device_name(self, interface_id):
     if not interface_id:
         LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
                         "tap device name"))
     tap_device_name = constants.TAP_DEVICE_PREFIX + interface_id[0:11]
     return tap_device_name
Exemple #37
0
 def get_vxlan_device_name(self, segmentation_id):
     if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
         return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
     else:
         LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
                         "incorrect vxlan device name"), segmentation_id)
 def register_resource(self, resource):
     if resource.name in self._resources:
         LOG.warning(_LW('%s is already registered'), resource.name)
     if resource.name in self._tracked_resource_mappings:
         resource.register_events()
     self._resources[resource.name] = resource
Exemple #39
0
 def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
     """Set MTU on the interface."""
     if not self._mtu_update_warn_logged:
         LOG.warning(_LW("Interface driver cannot update MTU for ports"))
         self._mtu_update_warn_logged = True
    def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm, fixed_ips,
                                                  device_owner):
        # since centralized-SNAT (CSNAT) port must have only one fixed
        # IP, directly use fixed_ips[0]
        fixed_ip = fixed_ips[0]
        if port.vif_id in self.local_ports:
            # throw an error if CSNAT port is already on a different
            # dvr routed subnet
            ovsport = self.local_ports[port.vif_id]
            subs = list(ovsport.get_subnets())
            if subs[0] == fixed_ip['subnet_id']:
                return
            LOG.error(
                _LE("Centralized-SNAT port %(port)s on subnet "
                    "%(port_subnet)s already seen on a different "
                    "subnet %(orig_subnet)s"), {
                        "port": port.vif_id,
                        "port_subnet": fixed_ip['subnet_id'],
                        "orig_subnet": subs[0],
                    })
            return
        subnet_uuid = fixed_ip['subnet_id']
        ldm = None
        subnet_info = None
        if subnet_uuid not in self.local_dvr_map:
            # no csnat ports seen on this subnet - create csnat state
            # for this subnet
            subnet_info = self.plugin_rpc.get_subnet_for_dvr(
                self.context, subnet_uuid, fixed_ips=fixed_ips)
            if not subnet_info:
                LOG.warning(
                    _LW("DVR: Unable to retrieve subnet information "
                        "for subnet_id %s. The subnet or the gateway "
                        "may have already been deleted"), subnet_uuid)
                return
            LOG.debug(
                "get_subnet_for_dvr for subnet %(uuid)s "
                "returned with %(info)s", {
                    "uuid": subnet_uuid,
                    "info": subnet_info
                })
            ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
            self.local_dvr_map[subnet_uuid] = ldm
        else:
            ldm = self.local_dvr_map[subnet_uuid]
            subnet_info = ldm.get_subnet_info()
            # Store csnat OF Port in the existing DVRSubnetMap
            ldm.set_csnat_ofport(port.ofport)

        # create ovsPort footprint for csnat port
        ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner)
        ovsport.add_subnet(subnet_uuid)
        self.local_ports[port.vif_id] = ovsport
        vlan_to_use = lvm.vlan
        if lvm.network_type == p_const.TYPE_VLAN:
            vlan_to_use = lvm.segmentation_id
        self.int_br.install_dvr_to_src_mac(
            network_type=lvm.network_type,
            vlan_tag=vlan_to_use,
            gateway_mac=subnet_info['gateway_mac'],
            dst_mac=ovsport.get_mac(),
            dst_port=ovsport.get_ofport())
Exemple #41
0
    def __init__(self, host, conf=None):
        if conf:
            self.conf = conf
        else:
            self.conf = cfg.CONF
        self.router_info = {}

        self._check_config_params()

        self.process_monitor = external_process.ProcessMonitor(
            config=self.conf, resource_type='router')

        self.driver = common_utils.load_interface_driver(self.conf)

        self.context = n_context.get_admin_context_without_session()
        self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
        self.fullsync = True
        self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE

        # Get the list of service plugins from Neutron Server
        # This is the first place where we contact neutron-server on startup
        # so retry in case its not ready to respond.
        while True:
            try:
                self.neutron_service_plugins = (
                    self.plugin_rpc.get_service_plugin_list(self.context))
            except oslo_messaging.RemoteError as e:
                with excutils.save_and_reraise_exception() as ctx:
                    ctx.reraise = False
                    LOG.warning(
                        _LW('l3-agent cannot check service plugins '
                            'enabled at the neutron server when '
                            'startup due to RPC error. It happens '
                            'when the server does not support this '
                            'RPC API. If the error is '
                            'UnsupportedVersion you can ignore this '
                            'warning. Detail message: %s'), e)
                self.neutron_service_plugins = None
            except oslo_messaging.MessagingTimeout as e:
                with excutils.save_and_reraise_exception() as ctx:
                    ctx.reraise = False
                    LOG.warning(
                        _LW('l3-agent cannot contact neutron server '
                            'to retrieve service plugins enabled. '
                            'Check connectivity to neutron server. '
                            'Retrying... '
                            'Detailed message: %(msg)s.'), {'msg': e})
                    continue
            break

        self.init_extension_manager(self.plugin_rpc)

        self.metadata_driver = None
        if self.conf.enable_metadata_proxy:
            self.metadata_driver = metadata_driver.MetadataDriver(self)

        self.namespaces_manager = namespace_manager.NamespaceManager(
            self.conf, self.driver, self.metadata_driver)

        self._queue = queue.RouterProcessingQueue()
        super(L3NATAgent, self).__init__(host=self.conf.host)

        self.target_ex_net_id = None
        self.use_ipv6 = ipv6_utils.is_enabled()

        self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
                                      self.driver,
                                      self.plugin_rpc.process_prefix_update,
                                      self.create_pd_router_update, self.conf)
Exemple #42
0
    def _bind_port_level(self, context, level, segments_to_bind):
        binding = context._binding
        port_id = context.current['id']
        LOG.debug(
            "Attempting to bind port %(port)s on host %(host)s "
            "at level %(level)s using segments %(segments)s", {
                'port': port_id,
                'host': context.host,
                'level': level,
                'segments': segments_to_bind
            })

        if level == MAX_BINDING_LEVELS:
            LOG.error(
                _LE("Exceeded maximum binding levels attempting to bind "
                    "port %(port)s on host %(host)s"), {
                        'port': context.current['id'],
                        'host': context.host
                    })
            return False

        for driver in self.ordered_mech_drivers:
            if not self._check_driver_to_bind(driver, segments_to_bind,
                                              context._binding_levels):
                continue
            try:
                context._prepare_to_bind(segments_to_bind)
                driver.obj.bind_port(context)
                segment = context._new_bound_segment
                if segment:
                    context._push_binding_level(
                        models.PortBindingLevel(port_id=port_id,
                                                host=context.host,
                                                level=level,
                                                driver=driver.name,
                                                segment_id=segment))
                    next_segments = context._next_segments_to_bind
                    if next_segments:
                        # Continue binding another level.
                        if self._bind_port_level(context, level + 1,
                                                 next_segments):
                            return True
                        else:
                            LOG.warning(
                                _LW("Failed to bind port %(port)s on "
                                    "host %(host)s at level %(lvl)s"), {
                                        'port': context.current['id'],
                                        'host': context.host,
                                        'lvl': level + 1
                                    })
                            context._pop_binding_level()
                    else:
                        # Binding complete.
                        LOG.debug(
                            "Bound port: %(port)s, "
                            "host: %(host)s, "
                            "vif_type: %(vif_type)s, "
                            "vif_details: %(vif_details)s, "
                            "binding_levels: %(binding_levels)s", {
                                'port': port_id,
                                'host': context.host,
                                'vif_type': binding.vif_type,
                                'vif_details': binding.vif_details,
                                'binding_levels': context.binding_levels
                            })
                        return True
            except Exception:
                LOG.exception(
                    _LE("Mechanism driver %s failed in "
                        "bind_port"), driver.name)
 def _delete_external_segment(self, plugin_context, es_id):
     try:
         self._delete_resource(self._group_policy_plugin, plugin_context,
                               'external_segment', es_id, False)
     except gp_ext.ExternalSegmentNotFound:
         LOG.warning(_LW('External Segment %s already deleted'), es_id)
Exemple #44
0
def initialize_all():
    ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
    ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP)
    # At this stage we have a fully populated resource attribute map;
    # build Pecan controllers and routes for all core resources
    for resource, collection in router.RESOURCES.items():
        resource_registry.register_resource_by_name(resource)
        plugin = manager.NeutronManager.get_plugin()
        new_controller = res_ctrl.CollectionsController(collection,
                                                        resource,
                                                        plugin=plugin)
        manager.NeutronManager.set_controller_for_resource(
            collection, new_controller)
        manager.NeutronManager.set_plugin_for_resource(resource, plugin)

    pecanized_resources = ext_mgr.get_pecan_resources()
    for pec_res in pecanized_resources:
        resource = attributes.PLURALS[pec_res.collection]
        manager.NeutronManager.set_controller_for_resource(
            pec_res.collection, pec_res.controller)
        manager.NeutronManager.set_plugin_for_resource(resource,
                                                       pec_res.plugin)

    # Now build Pecan Controllers and routes for all extensions
    resources = ext_mgr.get_resources()
    # Extensions controller is already defined, we don't need it.
    resources.pop(0)
    for ext_res in resources:
        path_prefix = ext_res.path_prefix.strip('/')
        collection = ext_res.collection
        if manager.NeutronManager.get_controller_for_resource(collection):
            # This is a collection that already has a pecan controller, we
            # do not need to do anything else
            continue
        legacy_controller = getattr(ext_res.controller, 'controller',
                                    ext_res.controller)
        new_controller = None
        if isinstance(legacy_controller, base.Controller):
            resource = legacy_controller.resource
            plugin = legacy_controller.plugin
            attr_info = legacy_controller.attr_info
            member_actions = legacy_controller.member_actions
            # Retrieving the parent resource.  It is expected the format of
            # the parent resource to be:
            # {'collection_name': 'name-of-collection',
            #  'member_name': 'name-of-resource'}
            # collection_name does not appear to be used in the legacy code
            # inside the controller logic, so we can assume we do not need it.
            parent = legacy_controller.parent or {}
            parent_resource = parent.get('member_name')
            new_controller = res_ctrl.CollectionsController(
                collection,
                resource,
                resource_info=attr_info,
                parent_resource=parent_resource,
                member_actions=member_actions)
            manager.NeutronManager.set_plugin_for_resource(resource, plugin)
            if path_prefix:
                manager.NeutronManager.add_resource_for_path_prefix(
                    collection, path_prefix)
        elif isinstance(legacy_controller, wsgi.Controller):
            new_controller = utils.ShimCollectionsController(
                collection, None, legacy_controller)
        else:
            LOG.warning(
                _LW("Unknown controller type encountered %s.  It will"
                    "be ignored."), legacy_controller)
        manager.NeutronManager.set_controller_for_resource(
            collection, new_controller)

    # Certain policy checks require that the extensions are loaded
    # and the RESOURCE_ATTRIBUTE_MAP populated before they can be
    # properly initialized. This can only be claimed with certainty
    # once this point in the code has been reached. In the event
    # that the policies have been initialized before this point,
    # calling reset will cause the next policy check to
    # re-initialize with all of the required data in place.
    policy.reset()
    def plug_services(self, context, deployment):
        if deployment:
            provider = deployment[0]['context'].provider
            management = deployment[0]['context'].management
            # Sorted from provider (N) to consumer (0)
            # TODO(ivar): validate number of interfaces per service per service
            # type is as expected
            self._sort_deployment(deployment)
            for part in deployment:
                info = part['plumbing_info']
                if not info:
                    return
                part_context = part['context']
                # Management PT can be created immediately
                self._create_service_target(context, part_context,
                                            info.get('management', []),
                                            management, 'management')
                # Create proper PTs based on the service type
                jump_ptg = None
                LOG.info(_LI("Plumbing service of type '%s'"),
                         info['plumbing_type'])
                if info['plumbing_type'] == common.PLUMBING_TYPE_ENDPOINT:
                    # No stitching needed, only provider side PT is created.
                    # overriding PT name in order to keep port security up
                    # for this kind of service.
                    info['provider'][0]['name'] = "tscp_endpoint_service_"
                    self._create_service_target(context, part_context,
                                                info.get('provider', []),
                                                provider, 'provider')

                elif info['plumbing_type'] == common.PLUMBING_TYPE_GATEWAY:
                    # L3 stitching needed, provider and consumer side PTs are
                    # created. One proxy_gateway is needed in consumer side
                    jump_ptg = self._create_l3_jump_group(
                        context, provider, part['context'].current_position)
                    # On provider side, this service is the default gateway
                    info['provider'][0]['group_default_gateway'] = True
                    self._create_service_target(context, part_context,
                                                info['provider'], provider,
                                                'provider')
                    # On consumer side, this service is the proxy gateway
                    info['consumer'][0]['proxy_gateway'] = True
                    self._create_service_target(context, part_context,
                                                info['consumer'], jump_ptg,
                                                'consumer')
                elif info['plumbing_type'] == common.PLUMBING_TYPE_TRANSPARENT:
                    # L2 stitching needed, provider and consumer side PTs are
                    # created
                    self._create_service_target(context, part_context,
                                                info.get('provider', []),
                                                provider, 'provider')
                    jump_ptg = self._create_l2_jump_group(
                        context, provider, part['context'].current_position)
                    self._create_service_target(context, part_context,
                                                info['consumer'], jump_ptg,
                                                'consumer')
                else:
                    LOG.warning(_LW("Unsupported plumbing type %s"),
                                info['plumbing_type'])
                # Replace current "provider" with jump ptg if needed
                provider = jump_ptg or provider
Exemple #46
0
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

from debtcollector import moves
from hyperv.neutron import security_groups_driver as sg_driver
from oslo_log import log as logging

from neutron._i18n import _LW

LOG = logging.getLogger(__name__)

# TODO(claudiub): Remove this module at the beginning of the O cycle.

new_driver = 'hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver'
LOG.warn(
    _LW("You are using the deprecated firewall driver: %(deprecated)s. "
        "Use the recommended driver %(new)s instead."), {
            'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__,
            'new': new_driver
        })

HyperVSecurityGroupsDriver = moves.moved_class(
    sg_driver.HyperVSecurityGroupsDriver, 'HyperVSecurityGroupsDriver',
    __name__)
Exemple #47
0
 def safe_configure_dhcp_for_network(self, network):
     try:
         self.configure_dhcp_for_network(network)
     except (exceptions.NetworkNotFound, RuntimeError):
         LOG.warn(_LW('Network %s may have been deleted and its resources '
                      'may have already been disposed.'), network.id)
    def remove_networks_from_down_agents(self):
        """Remove networks from down DHCP agents if admin state is up.

        Reschedule them if configured so.
        """

        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('DHCP', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = ncontext.get_admin_context()
        try:
            down_bindings = (
                context.session.query(NetworkDhcpAgentBinding).join(
                    agents_db.Agent).filter(
                        agents_db.Agent.heartbeat_timestamp < cutoff,
                        agents_db.Agent.admin_state_up))
            dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
            dead_bindings = [
                b for b in self._filter_bindings(context, down_bindings)
            ]
            agents = self.get_agents_db(
                context, {'agent_type': [constants.AGENT_TYPE_DHCP]})
            active_agents = [
                agent for agent in agents
                if self.is_eligible_agent(context, True, agent)
            ]
            if not active_agents:
                LOG.warning(
                    _LW("No DHCP agents available, "
                        "skipping rescheduling"))
                return
            for binding in dead_bindings:
                LOG.warning(
                    _LW("Removing network %(network)s from agent "
                        "%(agent)s because the agent did not report "
                        "to the server in the last %(dead_time)s "
                        "seconds."), {
                            'network': binding.network_id,
                            'agent': binding.dhcp_agent_id,
                            'dead_time': agent_dead_limit
                        })
                # save binding object to avoid ObjectDeletedError
                # in case binding is concurrently deleted from the DB
                saved_binding = {
                    'net': binding.network_id,
                    'agent': binding.dhcp_agent_id
                }
                try:
                    # do not notify agent if it considered dead
                    # so when it is restarted it won't see network delete
                    # notifications on its queue
                    self.remove_network_from_dhcp_agent(context,
                                                        binding.dhcp_agent_id,
                                                        binding.network_id,
                                                        notify=False)
                except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
                    # measures against concurrent operation
                    LOG.debug(
                        "Network %(net)s already removed from DHCP "
                        "agent %(agent)s", saved_binding)
                    # still continue and allow concurrent scheduling attempt
                except Exception:
                    LOG.exception(
                        _LE("Unexpected exception occurred while "
                            "removing network %(net)s from agent "
                            "%(agent)s"), saved_binding)

                if cfg.CONF.network_auto_schedule:
                    self._schedule_network(context, saved_binding['net'],
                                           dhcp_notifier)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(
                _LE("Exception encountered during network "
                    "rescheduling"))
Exemple #49
0
    def get_device_details(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        host = kwargs.get('host')
        # cached networks used for reducing number of network db calls
        # for server internal usage only
        cached_networks = kwargs.get('cached_networks')
        LOG.debug("Device %(device)s details requested by agent "
                  "%(agent_id)s with host %(host)s",
                  {'device': device, 'agent_id': agent_id, 'host': host})

        plugin = manager.NeutronManager.get_plugin()
        port_id = plugin._device_to_port_id(rpc_context, device)
        port_context = plugin.get_bound_port_context(rpc_context,
                                                     port_id,
                                                     host,
                                                     cached_networks)
        if not port_context:
            LOG.debug("Device %(device)s requested by agent "
                      "%(agent_id)s not found in database",
                      {'device': device, 'agent_id': agent_id})
            return {'device': device}

        segment = port_context.bottom_bound_segment
        port = port_context.current
        # caching information about networks for future use
        if cached_networks is not None:
            if port['network_id'] not in cached_networks:
                cached_networks[port['network_id']] = (
                    port_context.network.current)

        if not segment:
            LOG.warning(_LW("Device %(device)s requested by agent "
                            "%(agent_id)s on network %(network_id)s not "
                            "bound, vif_type: %(vif_type)s"),
                        {'device': device,
                         'agent_id': agent_id,
                         'network_id': port['network_id'],
                         'vif_type': port_context.vif_type})
            return {'device': device}

        if (not host or host == port_context.host):
            new_status = (n_const.PORT_STATUS_BUILD if port['admin_state_up']
                          else n_const.PORT_STATUS_DOWN)
            if port['status'] != new_status:
                plugin.update_port_status(rpc_context,
                                          port_id,
                                          new_status,
                                          host,
                                          port_context.network.current)

        network_qos_policy_id = port_context.network._network.get(
            qos_consts.QOS_POLICY_ID)
        entry = {'device': device,
                 'network_id': port['network_id'],
                 'port_id': port['id'],
                 'mac_address': port['mac_address'],
                 'admin_state_up': port['admin_state_up'],
                 'network_type': segment[api.NETWORK_TYPE],
                 'segmentation_id': segment[api.SEGMENTATION_ID],
                 'physical_network': segment[api.PHYSICAL_NETWORK],
                 'fixed_ips': port['fixed_ips'],
                 'device_owner': port['device_owner'],
                 'allowed_address_pairs': port['allowed_address_pairs'],
                 'port_security_enabled': port.get(psec.PORTSECURITY, True),
                 'qos_policy_id': port.get(qos_consts.QOS_POLICY_ID),
                 'network_qos_policy_id': network_qos_policy_id,
                 'profile': port[portbindings.PROFILE]}
        if 'security_groups' in port:
            entry['security_groups'] = port['security_groups']
        LOG.debug("Returning: %s", entry)
        return entry
 def _security_groups_agent_not_set(self):
     LOG.warning(
         _LW("Security group agent binding currently not set. "
             "This should be set by the end of the init "
             "process."))
 def _delete_fip(self, plugin_context, fip_id):
     try:
         self._delete_resource(self._l3_plugin, plugin_context,
                               'floatingip', fip_id)
     except l3.FloatingIPNotFound:
         LOG.warning(_LW('Floating IP %s Already deleted'), fip_id)
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        down_bindings = (context.session.query(RouterL3AgentBinding).join(
            agents_db.Agent).filter(
                agents_db.Agent.heartbeat_timestamp < cutoff,
                agents_db.Agent.admin_state_up
            ).outerjoin(
                l3_attrs_db.RouterExtraAttributes,
                l3_attrs_db.RouterExtraAttributes.router_id ==
                RouterL3AgentBinding.router_id).filter(
                    sa.or_(
                        l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                        l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                agent_mode = self._get_agent_mode(binding.l3_agent)
                if agent_mode == constants.L3_AGENT_MODE_DVR:
                    # rescheduling from l3 dvr agent on compute node doesn't
                    # make sense. Router will be removed from that agent once
                    # there are no dvr serviceable ports on that compute node
                    LOG.warn(
                        _LW('L3 DVR agent on node %(host)s is down. '
                            'Not rescheduling from agent in \'dvr\' '
                            'mode.'), {'host': binding.l3_agent.host})
                    continue
                LOG.warn(
                    _LW("Rescheduling router %(router)s from agent %(agent)s "
                        "because the agent did not report to the server in "
                        "the last %(dead_time)s seconds."), {
                            'router': binding.router_id,
                            'agent': binding.l3_agent_id,
                            'dead_time': agent_dead_limit
                        })
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(
                _LE("Exception encountered during router "
                    "rescheduling."))
Exemple #53
0
def is_firewall_enabled():
    if not _is_valid_driver_combination():
        LOG.warning(_LW("Driver configuration doesn't match with "
                        "enable_security_group"))

    return cfg.CONF.SECURITYGROUP.enable_security_group
Exemple #54
0
 def _unregister_db_event(self, listen_obj, listened_event, listen_hander):
     try:
         event.remove(listen_obj, listened_event, listen_hander)
     except sql_exc.InvalidRequestError:
         LOG.warning(_LW("No sqlalchemy event for resource %s found"),
                     listen_obj)
Exemple #55
0
 def get_subinterface_name(self, physical_interface, vlan_id):
     if not vlan_id:
         LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
                         "subinterface name"))
     subinterface_name = '%s.%s' % (physical_interface, vlan_id)
     return subinterface_name
Exemple #56
0
 def _get_vf_index(self, pci_slot):
     vf_index = self.pci_slot_map.get(pci_slot)
     if vf_index is None:
         LOG.warning(_LW("Cannot find vf index for pci slot %s"), pci_slot)
         raise exc.InvalidPciSlotError(pci_slot=pci_slot)
     return vf_index
Exemple #57
0
 def get_bridge_name(self, network_id):
     if not network_id:
         LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
                         "bridge name"))
     bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
     return bridge_name
 def _delete_servicechain_spec(self, plugin_context, scs_id):
     try:
         self._delete_resource(self._servicechain_plugin, plugin_context,
                               'servicechain_spec', scs_id)
     except sc_ext.ServiceChainSpecNotFound:
         LOG.warning(_LW("servicechain spec %s already deleted"), scs_id)
    def _bind_distributed_router_interface_port(self, port, lvm, fixed_ips,
                                                device_owner):
        # since distributed router port must have only one fixed
        # IP, directly use fixed_ips[0]
        fixed_ip = fixed_ips[0]
        subnet_uuid = fixed_ip['subnet_id']
        if subnet_uuid in self.local_dvr_map:
            ldm = self.local_dvr_map[subnet_uuid]
        else:
            # set up LocalDVRSubnetMapping available for this subnet
            subnet_info = self.plugin_rpc.get_subnet_for_dvr(
                self.context, subnet_uuid, fixed_ips=fixed_ips)
            if not subnet_info:
                LOG.warning(
                    _LW("DVR: Unable to retrieve subnet information "
                        "for subnet_id %s. The subnet or the gateway "
                        "may have already been deleted"), subnet_uuid)
                return
            LOG.debug(
                "get_subnet_for_dvr for subnet %(uuid)s "
                "returned with %(info)s", {
                    "uuid": subnet_uuid,
                    "info": subnet_info
                })
            ldm = LocalDVRSubnetMapping(subnet_info)
            self.local_dvr_map[subnet_uuid] = ldm

        # DVR takes over
        ldm.set_dvr_owned(True)

        vlan_to_use = lvm.vlan
        if lvm.network_type == p_const.TYPE_VLAN:
            vlan_to_use = lvm.segmentation_id

        subnet_info = ldm.get_subnet_info()
        ip_version = subnet_info['ip_version']
        local_compute_ports = (self.plugin_rpc.get_ports_on_host_by_subnet(
            self.context, self.host, subnet_uuid))
        LOG.debug(
            "DVR: List of ports received from "
            "get_ports_on_host_by_subnet %s", local_compute_ports)
        vif_by_id = self.int_br.get_vifs_by_ids(
            [local_port['id'] for local_port in local_compute_ports])
        for local_port in local_compute_ports:
            vif = vif_by_id.get(local_port['id'])
            if not vif:
                continue
            ldm.add_compute_ofport(vif.vif_id, vif.ofport)
            if vif.vif_id in self.local_ports:
                # ensure if a compute port is already on
                # a different dvr routed subnet
                # if yes, queue this subnet to that port
                comp_ovsport = self.local_ports[vif.vif_id]
                comp_ovsport.add_subnet(subnet_uuid)
            else:
                # the compute port is discovered first here that its on
                # a dvr routed subnet queue this subnet to that port
                comp_ovsport = OVSPort(vif.vif_id, vif.ofport, vif.vif_mac,
                                       local_port['device_owner'])
                comp_ovsport.add_subnet(subnet_uuid)
                self.local_ports[vif.vif_id] = comp_ovsport
            # create rule for just this vm port
            self.int_br.install_dvr_to_src_mac(
                network_type=lvm.network_type,
                vlan_tag=vlan_to_use,
                gateway_mac=subnet_info['gateway_mac'],
                dst_mac=comp_ovsport.get_mac(),
                dst_port=comp_ovsport.get_ofport())

        if lvm.network_type == p_const.TYPE_VLAN:
            # TODO(vivek) remove the IPv6 related flows once SNAT is not
            # used for IPv6 DVR.
            br = self.phys_brs[lvm.physical_network]
        if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
            br = self.tun_br
        # TODO(vivek) remove the IPv6 related flows once SNAT is not
        # used for IPv6 DVR.
        if ip_version == 4:
            br.install_dvr_process_ipv4(vlan_tag=lvm.vlan,
                                        gateway_ip=subnet_info['gateway_ip'])
        else:
            br.install_dvr_process_ipv6(vlan_tag=lvm.vlan,
                                        gateway_mac=subnet_info['gateway_mac'])
        br.install_dvr_process(vlan_tag=lvm.vlan,
                               vif_mac=port.vif_mac,
                               dvr_mac_address=self.dvr_mac_address)

        # the dvr router interface is itself a port, so capture it
        # queue this subnet to that port. A subnet appears only once as
        # a router interface on any given router
        ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner)
        ovsport.add_subnet(subnet_uuid)
        self.local_ports[port.vif_id] = ovsport
Exemple #60
0
    def plug_new(self,
                 network_id,
                 port_id,
                 device_name,
                 mac_address,
                 bridge=None,
                 namespace=None,
                 prefix=None,
                 mtu=None):
        """Plug in the interface."""
        if not bridge:
            bridge = self.conf.ovs_integration_bridge

        self.check_bridge_exists(bridge)

        ip = ip_lib.IPWrapper()
        tap_name = self._get_tap_name(device_name, prefix)

        if self.conf.ovs_use_veth:
            # Create ns_dev in a namespace if one is configured.
            root_dev, ns_dev = ip.add_veth(tap_name,
                                           device_name,
                                           namespace2=namespace)
            root_dev.disable_ipv6()
        else:
            ns_dev = ip.device(device_name)

        internal = not self.conf.ovs_use_veth
        self._ovs_add_port(bridge,
                           tap_name,
                           port_id,
                           mac_address,
                           internal=internal)
        for i in range(9):
            # workaround for the OVS shy port syndrome. ports sometimes
            # hide for a bit right after they are first created.
            # see bug/1618987
            try:
                ns_dev.link.set_address(mac_address)
                break
            except RuntimeError as e:
                LOG.warning(_LW("Got error trying to set mac, retrying: %s"),
                            str(e))
                time.sleep(1)
        else:
            # didn't break, we give it one last shot without catching
            ns_dev.link.set_address(mac_address)

        # Add an interface created by ovs to the namespace.
        if not self.conf.ovs_use_veth and namespace:
            namespace_obj = ip.ensure_namespace(namespace)
            namespace_obj.add_device_to_namespace(ns_dev)

        # NOTE(ihrachys): the order here is significant: we must set MTU after
        # the device is moved into a namespace, otherwise OVS bridge does not
        # allow to set MTU that is higher than the least of all device MTUs on
        # the bridge
        if mtu:
            self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
        else:
            LOG.warning(_LW("No MTU configured for port %s"), port_id)

        ns_dev.link.set_up()
        if self.conf.ovs_use_veth:
            root_dev.link.set_up()