Exemplo n.º 1
0
def main():
    common_config.init(sys.argv[1:])

    common_config.setup_logging()
    try:
        interface_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
                      "Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_LI("Interface mappings: %s"), interface_mappings)

    try:
        bridge_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing bridge_mappings failed: %s. "
                      "Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_LI("Bridge mappings: %s"), bridge_mappings)

    manager = LinuxBridgeManager(bridge_mappings, interface_mappings)

    polling_interval = cfg.CONF.AGENT.polling_interval
    quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
    agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout,
                               constants.AGENT_TYPE_LINUXBRIDGE,
                               LB_AGENT_BINARY)
    LOG.info(_LI("Agent initialized successfully, now running... "))
    launcher = service.launch(cfg.CONF, agent)
    launcher.wait()
Exemplo n.º 2
0
def remove_empty_bridges():
    try:
        interface_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e)
        sys.exit(1)
    LOG.info(_LI("Interface mappings: %s."), interface_mappings)

    try:
        bridge_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing bridge_mappings failed: %s."), e)
        sys.exit(1)
    LOG.info(_LI("Bridge mappings: %s."), bridge_mappings)

    lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager(
        bridge_mappings, interface_mappings)

    bridge_names = lb_manager.get_deletable_bridges()
    for bridge_name in bridge_names:
        if lb_manager.get_tap_devices_count(bridge_name):
            continue

        try:
            lb_manager.delete_bridge(bridge_name)
            LOG.info(_LI("Linux bridge %s deleted"), bridge_name)
        except RuntimeError:
            LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name)
    LOG.info(_LI("Linux bridge cleanup completed successfully"))
Exemplo n.º 3
0
    def ensure_physical_in_bridge(self, network_id,
                                  network_type,
                                  physical_network,
                                  segmentation_id):
        if network_type == p_const.TYPE_VXLAN:
            if self.vxlan_mode == lconst.VXLAN_NONE:
                LOG.error(_LE("Unable to add vxlan interface for network %s"),
                          network_id)
                return
            return self.ensure_vxlan_bridge(network_id, segmentation_id)

        # NOTE(nick-ma-z): Obtain mappings of physical bridge and interfaces
        physical_bridge = self.get_existing_bridge_name(physical_network)
        physical_interface = self.interface_mappings.get(physical_network)
        if not physical_bridge and not physical_interface:
            LOG.error(_LE("No bridge or interface mappings"
                          " for physical network %s"),
                      physical_network)
            return
        if network_type == p_const.TYPE_FLAT:
            return self.ensure_flat_bridge(network_id, physical_bridge,
                                           physical_interface)
        elif network_type == p_const.TYPE_VLAN:
            return self.ensure_vlan_bridge(network_id, physical_bridge,
                                           physical_interface,
                                           segmentation_id)
        else:
            LOG.error(_LE("Unknown network_type %(network_type)s for network "
                          "%(network_id)s."), {network_type: network_type,
                                             network_id: network_id})
Exemplo n.º 4
0
    def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw):
        """Invoke the delete driver method for status of PENDING_DELETE and
        update method for all other status to (re)apply on driver which is
        Idempotent.
        """
        if fw["status"] == constants.PENDING_DELETE:
            try:
                self.fwaas_driver.delete_firewall(self.conf.agent_mode, router_info_list, fw)
                self.fwplugin_rpc.firewall_deleted(ctx, fw["id"])
            except nexception.FirewallInternalDriverError:
                LOG.error(
                    _LE("Firewall Driver Error on fw state %(fwmsg)s " "for fw: %(fwid)s"),
                    {"fwmsg": fw["status"], "fwid": fw["id"]},
                )
                self.fwplugin_rpc.set_firewall_status(ctx, fw["id"], constants.ERROR)
        else:
            # PENDING_UPDATE, PENDING_CREATE, ...
            try:
                self.fwaas_driver.update_firewall(self.conf.agent_mode, router_info_list, fw)
                if fw["admin_state_up"]:
                    status = constants.ACTIVE
                else:
                    status = constants.DOWN
            except nexception.FirewallInternalDriverError:
                LOG.error(
                    _LE("Firewall Driver Error on fw state %(fwmsg)s " "for fw: %(fwid)s"),
                    {"fwmsg": fw["status"], "fwid": fw["id"]},
                )
                status = constants.ERROR

            self.fwplugin_rpc.set_firewall_status(ctx, fw["id"], status)
Exemplo n.º 5
0
    def handle_subports(self, subports, event_type):
        # Subports are always created with the same trunk_id and there is
        # always at least one item in subports list
        trunk_id = subports[0].trunk_id

        if self.ovsdb_handler.manages_this_trunk(trunk_id):
            if event_type not in (events.CREATED, events.DELETED):
                LOG.error(_LE("Unknown or unimplemented event %s"), event_type)
                return

            ctx = self.ovsdb_handler.context
            try:
                LOG.debug("Event %s for subports: %s", event_type, subports)
                if event_type == events.CREATED:
                    status = self.ovsdb_handler.wire_subports_for_trunk(
                            ctx, trunk_id, subports)
                elif event_type == events.DELETED:
                    subport_ids = [subport.port_id for subport in subports]
                    status = self.ovsdb_handler.unwire_subports_for_trunk(
                        trunk_id, subport_ids)
                self.ovsdb_handler.report_trunk_status(ctx, trunk_id, status)
            except oslo_messaging.MessagingException as e:
                LOG.error(_LE(
                    "Error on event %(event)s for subports "
                    "%(subports)s: %(err)s"),
                    {'event': event_type, 'subports': subports, 'err': e})
    def delete_addr_and_conntrack_state(self, cidr):
        """Delete an address along with its conntrack state

        This terminates any active connections through an IP.

        :param cidr: the IP address for which state should be removed.
            This can be passed as a string with or without /NN.
            A netaddr.IPAddress or netaddr.Network representing the IP address
            can also be passed.
        """
        self.addr.delete(cidr)

        ip_str = str(netaddr.IPNetwork(cidr).ip)
        ip_wrapper = IPWrapper(namespace=self.namespace)

        # Delete conntrack state for ingress traffic
        # If 0 flow entries have been deleted
        # conntrack -D will return 1
        try:
            ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str],
                                     check_exit_code=True,
                                     extra_ok_codes=[1])

        except RuntimeError:
            LOG.exception(_LE("Failed deleting ingress connection state of"
                              " floatingip %s"), ip_str)

        # Delete conntrack state for egress traffic
        try:
            ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str],
                                     check_exit_code=True,
                                     extra_ok_codes=[1])
        except RuntimeError:
            LOG.exception(_LE("Failed deleting egress connection state of"
                              " floatingip %s"), ip_str)
Exemplo n.º 7
0
    def unwire_subports_for_trunk(self, trunk_id, subport_ids):
        """Destroy OVS ports associated to the logical subports."""
        ids = []
        for subport_id in subport_ids:
            try:
                self.trunk_manager.remove_sub_port(trunk_id, subport_id)
                ids.append(subport_id)
            except tman.TrunkManagerError as te:
                LOG.error(_LE("Removing subport %(subport_id)s from trunk "
                              "%(trunk_id)s failed: %(err)s"),
                          {'subport_id': subport_id,
                           'trunk_id': trunk_id,
                           'err': te})
        try:
            # OVS bridge and port to be determined by _update_trunk_metadata
            bridge = None
            port = None
            self._update_trunk_metadata(
                bridge, port, trunk_id, subport_ids, wire=False)
        except RuntimeError as e:
            # NOTE(status_police): Trunk bridge has stale metadata now, it
            # might cause troubles during deletion. Signal a DEGRADED status;
            # if the user undo/redo the operation things may go back to
            # normal.
            LOG.error(_LE("Failed to store metadata for trunk %(trunk_id)s: "
                          "%(reason)s"), {'trunk_id': trunk_id, 'reason': e})
            return constants.DEGRADED_STATUS
        except exceptions.ParentPortNotFound as e:
            # If a user deletes/migrates a VM and remove subports from a trunk
            # in short sequence, there is a chance that we hit this spot in
            # that the trunk may still be momentarily bound to the agent. We
            # should not mark the status as DEGRADED in this case.
            LOG.debug(e)

        return self._get_current_status(subport_ids, ids)
    def _bind_port_level(self, context, level, segments_to_bind):
        binding = context._binding
        port_id = context.current['id']
        LOG.debug("Attempting to bind port %(port)s on host %(host)s "
                  "at level %(level)s using segments %(segments)s",
                  {'port': port_id,
                   'host': context.host,
                   'level': level,
                   'segments': segments_to_bind})

        if level == MAX_BINDING_LEVELS:
            LOG.error(_LE("Exceeded maximum binding levels attempting to bind "
                        "port %(port)s on host %(host)s"),
                      {'port': context.current['id'],
                       'host': context.host})
            return False

        for driver in self.ordered_mech_drivers:
            if not self._check_driver_to_bind(driver, segments_to_bind,
                                              context._binding_levels):
                continue
            try:
                context._prepare_to_bind(segments_to_bind)
                driver.obj.bind_port(context)
                segment = context._new_bound_segment
                if segment:
                    context._push_binding_level(
                        models.PortBindingLevel(port_id=port_id,
                                                host=context.host,
                                                level=level,
                                                driver=driver.name,
                                                segment_id=segment))
                    next_segments = context._next_segments_to_bind
                    if next_segments:
                        # Continue binding another level.
                        if self._bind_port_level(context, level + 1,
                                                 next_segments):
                            return True
                        else:
                            context._pop_binding_level()
                    else:
                        # Binding complete.
                        LOG.debug("Bound port: %(port)s, "
                                  "host: %(host)s, "
                                  "vif_type: %(vif_type)s, "
                                  "vif_details: %(vif_details)s, "
                                  "binding_levels: %(binding_levels)s",
                                  {'port': port_id,
                                   'host': context.host,
                                   'vif_type': binding.vif_type,
                                   'vif_details': binding.vif_details,
                                   'binding_levels': context.binding_levels})
                        return True
            except Exception:
                LOG.exception(_LE("Mechanism driver %s failed in "
                                  "bind_port"),
                              driver.name)
        LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"),
                  {'port': context.current['id'],
                   'host': binding.host})
Exemplo n.º 9
0
    def create_dvr_fip_interfaces(self, ex_gw_port):
        floating_ips = self.get_floating_ips()
        fip_agent_port = self.get_floating_agent_gw_interface(
            ex_gw_port['network_id'])
        if fip_agent_port:
            LOG.debug("FloatingIP agent gateway port received from the "
                "plugin: %s", fip_agent_port)
        is_first = False
        if floating_ips:
            is_first = self.fip_ns.subscribe(ex_gw_port['network_id'])
            if is_first and not fip_agent_port:
                LOG.debug("No FloatingIP agent gateway port possibly due to "
                          "late binding of the private port to the host, "
                          "requesting agent gateway port for 'network-id' :"
                          "%s", ex_gw_port['network_id'])
                fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
                    self.agent.context, ex_gw_port['network_id'])
                if not fip_agent_port:
                    LOG.error(_LE("No FloatingIP agent gateway port "
                                  "returned from server for 'network-id': "
                                  "%s"), ex_gw_port['network_id'])
            if fip_agent_port:
                if 'subnets' not in fip_agent_port:
                    LOG.error(_LE('Missing subnet/agent_gateway_port'))
                else:
                    if is_first:
                        self.fip_ns.create_gateway_port(fip_agent_port)
                    else:
                        self.fip_ns.update_gateway_port(fip_agent_port)

            if (self.fip_ns.agent_gateway_port and
                (self.dist_fip_count == 0)):
                self.fip_ns.create_rtr_2_fip_link(self)
Exemplo n.º 10
0
def destroy_namespace(conf, namespace, force=False):
    """Destroy a given namespace.

    If force is True, then dhcp (if it exists) will be disabled and all
    devices will be forcibly removed.
    """

    try:
        ip = ip_lib.IPWrapper(namespace=namespace)

        if force:
            kill_dhcp(conf, namespace)
            # NOTE: The dhcp driver will remove the namespace if is it empty,
            # so a second check is required here.
            if ip.netns.exists(namespace):
                try:
                    kill_listen_processes(namespace)
                except PidsInNamespaceException:
                    # This is unlikely since, at this point, we have SIGKILLed
                    # all remaining processes but if there are still some, log
                    # the error and continue with the cleanup
                    LOG.error(_LE('Not all processes were killed in %s'),
                              namespace)
                for device in ip.get_devices():
                    unplug_device(conf, device)

        ip.garbage_collect_namespace()
    except Exception:
        LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
Exemplo n.º 11
0
 def get_resource_binding_info(self, context, resource_type, resource_id, load_hd_info=True):
     query = context.session.query(ResourceHostingDeviceBinding)
     if load_hd_info:
         query = query.options(orm.joinedload("agent_binding"))
     query = query.filter(
         ResourceHostingDeviceBinding.resource_type == resource_type,
         ResourceHostingDeviceBinding.resource_id == resource_id,
     )
     try:
         return query.one()
     except exc.NoResultFound:
         # This should not happen
         LOG.error(
             _LE("DB inconsistency: No type and hosting info " "associated with %(resource_type)s %(resource_id)s"),
             {"resource_type": resource_type, "resource_id": resource_id},
         )
         raise ResourceBindingInfoError(resource_id=resource_id)
     except exc.MultipleResultsFound:
         # This should not happen either
         LOG.error(
             _LE(
                 "DB inconsistency: Multiple type and hosting info"
                 " associated with %(resource_type)s "
                 "%(resource_id)s"
             ),
             {"resource_type": resource_type, "resource_id": resource_id},
         )
         raise ResourceBindingInfoError(resource_id=resource_id)
Exemplo n.º 12
0
    def _get_default_external_network(self, context):
        """Get the default external network for the deployment."""
        with context.session.begin(subtransactions=True):
            default_external_networks = (
                context.session.query(ext_net_models.ExternalNetwork)
                .filter_by(is_default=sql.true())
                .join(models_v2.Network)
                .join(standard_attr.StandardAttribute)
                .order_by(standard_attr.StandardAttribute.id)
                .all()
            )

        if not default_external_networks:
            LOG.error(
                _LE(
                    "Unable to find default external network "
                    "for deployment, please create/assign one to "
                    "allow auto-allocation to work correctly."
                )
            )
            raise exceptions.AutoAllocationFailure(reason=_("No default router:external network"))
        if len(default_external_networks) > 1:
            LOG.error(
                _LE("Multiple external default networks detected. " "Network %s is true 'default'."),
                default_external_networks[0]["network_id"],
            )
        return default_external_networks[0].network_id
Exemplo n.º 13
0
    def unwire_subports_for_trunk(self, trunk_id, subport_ids):
        """Destroy OVS ports associated to the logical subports."""
        ids = []
        for subport_id in subport_ids:
            try:
                self.trunk_manager.remove_sub_port(trunk_id, subport_id)
                ids.append(subport_id)
            except tman.TrunkManagerError as te:
                LOG.error(_LE("Removing subport %(subport_id)s from trunk "
                              "%(trunk_id)s failed: %(err)s"),
                          {'subport_id': subport_id,
                           'trunk_id': trunk_id,
                           'err': te})
        try:
            # OVS bridge and port to be determined by _update_trunk_metadata
            bridge = None
            port = None
            self._update_trunk_metadata(
                bridge, port, trunk_id, subport_ids, wire=False)
        except RuntimeError:
            # NOTE(status_police): Trunk bridge has stale metadata now, it
            # might cause troubles during deletion. Signal a DEGRADED status;
            # if the user undo/redo the operation things may go back to
            # normal.
            LOG.error(_LE("Failed to store metadata for trunk %s"), trunk_id)
            return constants.DEGRADED_STATUS

        return self._get_current_status(subport_ids, ids)
Exemplo n.º 14
0
    def sync_state(self, networks=None):
        """Sync the local DHCP state with Neutron. If no networks are passed,
        or 'None' is one of the networks, sync all of the networks.
        """
        only_nets = set([] if (not networks or None in networks) else networks)
        LOG.info(_LI('Synchronizing state'))
        pool = eventlet.GreenPool(self.conf.num_sync_threads)
        known_network_ids = set(self.cache.get_network_ids())

        try:
            active_networks = self.plugin_rpc.get_active_networks_info()
            active_network_ids = set(network.id for network in active_networks)
            for deleted_id in known_network_ids - active_network_ids:
                try:
                    self.disable_dhcp_helper(deleted_id)
                except Exception as e:
                    self.schedule_resync(e, deleted_id)
                    LOG.exception(_LE('Unable to sync network state on '
                                      'deleted network %s'), deleted_id)

            for network in active_networks:
                if (not only_nets or  # specifically resync all
                        network.id not in known_network_ids or  # missing net
                        network.id in only_nets):  # specific network to sync
                    pool.spawn(self.safe_configure_dhcp_for_network, network)
            pool.waitall()
            LOG.info(_LI('Synchronizing state complete'))

        except Exception as e:
            if only_nets:
                for network_id in only_nets:
                    self.schedule_resync(e, network_id)
            else:
                self.schedule_resync(e)
            LOG.exception(_LE('Unable to sync network state.'))
 def _create_port(self, port):
     switchports = port['port']['switchports']
     LOG.debug(_LE("_create_port switch: %s"), port)
     network_id = port['port']['network_id']
     db_context = neutron_context.get_admin_context()
     subnets = db.get_subnets_by_network(db_context, network_id)
     if not subnets:
         LOG.error("Subnet not found for the network")
         self._raise_ml2_error(wexc.HTTPNotFound, 'create_port')
     for switchport in switchports:
         switch_mac_id = switchport['switch_id']
         port_id = switchport['port_id']
         bnp_switch = db.get_bnp_phys_switch_by_mac(db_context,
                                                    switch_mac_id)
         # check for port and switch level existence
         if not bnp_switch:
             LOG.error(_LE("No physical switch found '%s' "), switch_mac_id)
             self._raise_ml2_error(wexc.HTTPNotFound, 'create_port')
         phys_port = db.get_bnp_phys_port(db_context,
                                          bnp_switch.id,
                                          port_id)
         if not phys_port:
             LOG.error(_LE("No physical port found for '%s' "), phys_port)
             self._raise_ml2_error(wexc.HTTPNotFound, 'create_port')
         if bnp_switch.port_prov != hp_const.SWITCH_STATUS['enable']:
             LOG.error(_LE("Physical switch is not Enabled '%s' "),
                       bnp_switch.port_prov)
             self._raise_ml2_error(wexc.HTTPBadRequest, 'create_port')
Exemplo n.º 16
0
 def _dhcp_ready_ports_loop(self):
     """Notifies the server of any ports that had reservations setup."""
     while True:
         # this is just watching a set so we can do it really frequently
         eventlet.sleep(0.1)
         if self.dhcp_ready_ports:
             ports_to_send = self.dhcp_ready_ports
             self.dhcp_ready_ports = set()
             try:
                 self.plugin_rpc.dhcp_ready_on_ports(ports_to_send)
                 continue
             except oslo_messaging.MessagingTimeout:
                 LOG.error(_LE("Timeout notifying server of ports ready. "
                               "Retrying..."))
             except Exception as e:
                 if (isinstance(e, oslo_messaging.RemoteError)
                         and e.exc_type == 'NoSuchMethod'):
                     LOG.info(_LI("Server does not support port ready "
                                  "notifications. Waiting for 5 minutes "
                                  "before retrying."))
                     eventlet.sleep(300)
                     continue
                 LOG.exception(_LE("Failure notifying DHCP server of "
                                   "ready DHCP ports. Will retry on next "
                                   "iteration."))
             self.dhcp_ready_ports |= ports_to_send
Exemplo n.º 17
0
def main():
    common_config.init(sys.argv[1:])

    common_config.setup_logging()
    try:
        config_parser = SriovNicAgentConfigParser()
        config_parser.parse()
        device_mappings = config_parser.device_mappings
        exclude_devices = config_parser.exclude_devices

    except ValueError:
        LOG.exception(_LE("Failed on Agent configuration parse. "
                          "Agent terminated!"))
        raise SystemExit(1)
    LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
    LOG.info(_LI("Exclude Devices: %s"), exclude_devices)

    polling_interval = cfg.CONF.AGENT.polling_interval
    try:
        agent = SriovNicSwitchAgent(device_mappings,
                                    exclude_devices,
                                    polling_interval)
    except exc.SriovNicError:
        LOG.exception(_LE("Agent Initialization Failed"))
        raise SystemExit(1)
    # Start everything.
    LOG.info(_LI("Agent initialized successfully, now running... "))
    agent.daemon_loop()
Exemplo n.º 18
0
def load_class_by_alias_or_classname(namespace, name):
    """Load class using stevedore alias or the class name
    :param namespace: namespace where the alias is defined
    :param name: alias or class name of the class to be loaded
    :returns class if calls can be loaded
    :raises ImportError if class cannot be loaded
    """

    if not name:
        LOG.error(_LE("Alias or class name is not set"))
        raise ImportError(_("Class not found."))
    try:
        # Try to resolve class by alias
        mgr = driver.DriverManager(namespace, name)
        class_to_load = mgr.driver
    except RuntimeError:
        e1_info = sys.exc_info()
        # Fallback to class name
        try:
            class_to_load = importutils.import_class(name)
        except (ImportError, ValueError):
            LOG.error(_LE("Error loading class by alias"),
                      exc_info=e1_info)
            LOG.error(_LE("Error loading class by class name"),
                      exc_info=True)
            raise ImportError(_("Class not found."))
    return class_to_load
    def __init__(self, message=None, **kwargs):
        self.kwargs = kwargs
        self.kwargs['message'] = message

        if 'code' not in self.kwargs:
            try:
                self.kwargs['code'] = self.code
            except AttributeError:
                pass

        for k, v in self.kwargs.items():
            if isinstance(v, Exception):
                self.kwargs[k] = six.text_type(v)

        if self._should_format():
            try:
                message = self.message % kwargs

            except Exception:
                exc_info = sys.exc_info()
                # kwargs doesn't match a variable in the message
                # log the issue and the kwargs
                LOG.exception(_LE('Exception in string format operation'))
                for name, value in kwargs.items():
                    LOG.error(_LE("%(name)s: %(value)s"),
                              {'name': name, 'value': value})
                if CONF.fatal_exception_format_errors:
                    six.reraise(*exc_info)
                # at least get the core message out if something happened
                message = self.message
        elif isinstance(message, Exception):
            message = six.text_type(message)

        self.msg = message
        super(NFPException, self).__init__(message)
Exemplo n.º 20
0
 def send_events(self, batched_events):
     LOG.debug("Sending events: %s", batched_events)
     try:
         response = self.nclient.server_external_events.create(
             batched_events)
     except nova_exceptions.NotFound:
         LOG.warning(_LW("Nova returned NotFound for event: %s"),
                     batched_events)
     except Exception:
         LOG.exception(_LE("Failed to notify nova on events: %s"),
                       batched_events)
     else:
         if not isinstance(response, list):
             LOG.error(_LE("Error response returned from nova: %s"),
                       response)
             return
         response_error = False
         for event in response:
             try:
                 code = event['code']
             except KeyError:
                 response_error = True
                 continue
             if code != 200:
                 LOG.warning(_LW("Nova event: %s returned with failed "
                                 "status"), event)
             else:
                 LOG.info(_LI("Nova event response: %s"), event)
         if response_error:
             LOG.error(_LE("Error response returned from nova: %s"),
                       response)
Exemplo n.º 21
0
 def _notify_loop(self, resource, event, trigger, **kwargs):
     """The notification loop."""
     errors = []
     callbacks = list(self._callbacks[resource].get(event, {}).items())
     LOG.debug("Notify callbacks %s for %s, %s",
               callbacks, resource, event)
     # TODO(armax): consider using a GreenPile
     for callback_id, callback in callbacks:
         try:
             callback(resource, event, trigger, **kwargs)
         except Exception as e:
             abortable_event = (
                 event.startswith(events.BEFORE) or
                 event.startswith(events.PRECOMMIT)
             )
             if not abortable_event:
                 LOG.exception(_LE("Error during notification for "
                                   "%(callback)s %(resource)s, %(event)s"),
                               {'callback': callback_id,
                                'resource': resource, 'event': event})
             else:
                 LOG.error(_LE("Callback %(callback)s raised %(error)s"),
                           {'callback': callback_id, 'error': e})
             errors.append(exceptions.NotificationError(callback_id, e))
     return errors
 def _send_msg(self, msg, reply_cls=None, reply_multi=False):
     timeout_sec = cfg.CONF.OVS.of_request_timeout
     timeout = eventlet.timeout.Timeout(seconds=timeout_sec)
     try:
         result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi)
     except ryu_exc.RyuException as e:
         m = _LE("ofctl request %(request)s error %(error)s") % {
             "request": msg,
             "error": e,
         }
         LOG.error(m)
         # NOTE(yamamoto): use RuntimeError for compat with ovs_lib
         raise RuntimeError(m)
     except eventlet.timeout.Timeout as e:
         with excutils.save_and_reraise_exception() as ctx:
             if e is timeout:
                 ctx.reraise = False
                 m = _LE("ofctl request %(request)s timed out") % {
                     "request": msg,
                 }
                 LOG.error(m)
                 # NOTE(yamamoto): use RuntimeError for compat with ovs_lib
                 raise RuntimeError(m)
     finally:
         timeout.cancel()
     LOG.debug("ofctl request %(request)s result %(result)s",
               {"request": msg, "result": result})
     return result
Exemplo n.º 23
0
 def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add):
     """Adds or removes rules and routes for SNAT redirection."""
     try:
         ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
         ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
         if is_add:
             ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
         for port_fixed_ip in sn_port["fixed_ips"]:
             # Iterate and find the gateway IP address matching
             # the IP version
             port_ip_addr = port_fixed_ip["ip_address"]
             port_ip_vers = netaddr.IPAddress(port_ip_addr).version
             for gw_fixed_ip in gateway["fixed_ips"]:
                 gw_ip_addr = gw_fixed_ip["ip_address"]
                 if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers:
                     sn_port_cidr = common_utils.ip_to_cidr(port_ip_addr, port_fixed_ip["prefixlen"])
                     snat_idx = self._get_snat_idx(sn_port_cidr)
                     if is_add:
                         ns_ipd.route.add_gateway(gw_ip_addr, table=snat_idx)
                         ns_ipr.rule.add(ip=sn_port_cidr, table=snat_idx, priority=snat_idx)
                         ns_ipwrapr.netns.execute(["sysctl", "-w", "net.ipv4.conf.%s.send_redirects=0" % sn_int])
                     else:
                         self._delete_gateway_device_if_exists(ns_ipd, gw_ip_addr, snat_idx)
                         ns_ipr.rule.delete(ip=sn_port_cidr, table=snat_idx, priority=snat_idx)
     except Exception:
         if is_add:
             exc = _LE("DVR: error adding redirection logic")
         else:
             exc = _LE("DVR: snat remove failed to clear the rule " "and device")
         LOG.exception(exc)
Exemplo n.º 24
0
def load_drivers(service_type, plugin):
    """Loads drivers for specific service.

    Passes plugin instance to driver's constructor
    """
    service_type_manager = sdb.ServiceTypeManager.get_instance()
    providers = service_type_manager.get_service_providers(None, filters={"service_type": [service_type]})
    if not providers:
        msg = _LE("No providers specified for '%s' service, exiting") % service_type
        LOG.error(msg)
        raise SystemExit(1)

    drivers = {}
    for provider in providers:
        try:
            drivers[provider["name"]] = importutils.import_object(provider["driver"], plugin)
            LOG.debug(
                "Loaded '%(provider)s' provider for service " "%(service_type)s",
                {"provider": provider["driver"], "service_type": service_type},
            )
        except ImportError:
            with excutils.save_and_reraise_exception():
                LOG.exception(
                    _LE("Error loading provider '%(provider)s' for " "service %(service_type)s"),
                    {"provider": provider["driver"], "service_type": service_type},
                )

    default_provider = None
    try:
        provider = service_type_manager.get_default_service_provider(None, service_type)
        default_provider = provider["name"]
    except pconf.DefaultServiceProviderNotFound:
        LOG.info(_LI("Default provider is not specified for service type %s"), service_type)

    return drivers, default_provider
Exemplo n.º 25
0
    def remove_networks_from_down_agents(self):
        """Remove networks from down DHCP agents if admin state is up.

        Reschedule them if configured so.
        """

        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents("DHCP", agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = ncontext.get_admin_context()
        try:
            down_bindings = (
                context.session.query(NetworkDhcpAgentBinding)
                .join(agents_db.Agent)
                .filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up)
            )
            dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
            dead_bindings = [b for b in self._filter_bindings(context, down_bindings)]
            agents = self.get_agents_db(context, {"agent_type": [constants.AGENT_TYPE_DHCP]})
            active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)]
            if not active_agents:
                LOG.warning(_LW("No DHCP agents available, " "skipping rescheduling"))
                return
            for binding in dead_bindings:
                LOG.warning(
                    _LW(
                        "Removing network %(network)s from agent "
                        "%(agent)s because the agent did not report "
                        "to the server in the last %(dead_time)s "
                        "seconds."
                    ),
                    {"network": binding.network_id, "agent": binding.dhcp_agent_id, "dead_time": agent_dead_limit},
                )
                # save binding object to avoid ObjectDeletedError
                # in case binding is concurrently deleted from the DB
                saved_binding = {"net": binding.network_id, "agent": binding.dhcp_agent_id}
                try:
                    # do not notify agent if it considered dead
                    # so when it is restarted it won't see network delete
                    # notifications on its queue
                    self.remove_network_from_dhcp_agent(
                        context, binding.dhcp_agent_id, binding.network_id, notify=False
                    )
                except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
                    # measures against concurrent operation
                    LOG.debug("Network %(net)s already removed from DHCP " "agent %(agent)s", saved_binding)
                    # still continue and allow concurrent scheduling attempt
                except Exception:
                    LOG.exception(
                        _LE("Unexpected exception occurred while " "removing network %(net)s from agent " "%(agent)s"),
                        saved_binding,
                    )

                if cfg.CONF.network_auto_schedule:
                    self._schedule_network(context, saved_binding["net"], dhcp_notifier)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during network " "rescheduling"))
Exemplo n.º 26
0
 def get_plugin(self):
     plugin = manager.NeutronManager.get_service_plugins().get(
         bgp_ext.BGP_EXT_ALIAS)
     if not plugin:
         LOG.error(_LE('No plugin for BGP routing registered'))
         msg = _LE('The resource could not be found.')
         raise webob.exc.HTTPNotFound(msg)
     return plugin
Exemplo n.º 27
0
    def _wire_trunk(self, trunk_br, port, rewire=False):
        """Wire trunk bridge with integration bridge.

        The method calls into trunk manager to create patch ports for trunk and
        patch ports for all subports associated with this trunk. If rewire is
        True, a diff is performed between desired state (the one got from the
        server) and actual state (the patch ports present on the trunk bridge)
        and subports are wired/unwired accordingly.

        :param trunk_br: OVSBridge object representing the trunk bridge.
        :param port: Parent port dict.
        :param rewire: True if local trunk state must be reconciled with
            server's state.
        """
        ctx = self.context
        try:
            parent_port_id = (
                self.trunk_manager.get_port_uuid_from_external_ids(port))
            trunk = self.trunk_rpc.get_trunk_details(ctx, parent_port_id)
        except tman.TrunkManagerError as te:
            LOG.error(_LE("Can't obtain parent port ID from port %s"),
                      port['name'])
            return
        except resources_rpc.ResourceNotFound:
            LOG.error(_LE("Port %s has no trunk associated."), parent_port_id)
            return

        try:
            self.trunk_manager.create_trunk(
                trunk.id, trunk.port_id,
                port['external_ids'].get('attached-mac'))
        except tman.TrunkManagerError as te:
            LOG.error(_LE("Failed to create trunk %(trunk_id)s: %(err)s"),
                      {'trunk_id': trunk.id,
                       'err': te})
            # NOTE(status_police): Trunk couldn't be created so it ends in
            # ERROR status and resync can fix that later.
            self.report_trunk_status(ctx, trunk.id, constants.ERROR_STATUS)
            return

        # We need to remove stale subports
        if rewire:
            old_subport_ids = self.get_connected_subports_for_trunk(trunk.id)
            subports = {p['port_id'] for p in trunk.sub_ports}
            subports_to_delete = set(old_subport_ids) - subports
            if subports_to_delete:
                self.unwire_subports_for_trunk(trunk.id, subports_to_delete)

        # NOTE(status_police): inform the server whether the operation
        # was a partial or complete success. Do not inline status.
        # NOTE: in case of rewiring we readd ports that are already present on
        # the bridge because e.g. the segmentation ID might have changed (e.g.
        # agent crashed, port was removed and readded with a different seg ID)
        status = self.wire_subports_for_trunk(
            ctx, trunk.id, trunk.sub_ports,
            trunk_bridge=trunk_br, parent_port=port)
        self.report_trunk_status(ctx, trunk.id, status)
Exemplo n.º 28
0
    def request(self, method, url, body=None, headers=None):
        """Do http request to xCat server

        Will return (response_status, response_reason, response_body)
        """
        headers = headers or {}
        if body is not None:
            body = jsonutils.dumps(body)
            headers = {'content-type': 'text/plain',
                       'content-length': len(body)}

        _rep_ptn = ''.join(('&password='******'xcat_server': CONF.AGENT.zvm_xcat_server,
                   'method': method,
                   'url': url.replace(_rep_ptn, ''),  # hide password in log
                   'headers': str(headers),
                   'body': body})

        try:
            self.conn.request(method, url, body, headers)
        except Exception as err:
            LOG.error(_LE("Request to xCat server %(host)s failed: %(err)s") %
                      {'host': self.host, 'err': err})
            raise exception.zVMxCatRequestFailed(xcatserver=self.host,
                                                 err=err)

        res = self.conn.getresponse()
        msg = res.read()
        resp = {
            'status': res.status,
            'reason': res.reason,
            'message': msg}

        LOG.debug("xCAT response: %s" % str(resp))

        # NOTE(rui): Currently, only xCat returns 200 or 201 can be
        #            considered acceptable.
        err = None
        if method == "POST":
            if res.status != 201:
                err = str(resp)
        else:
            if res.status != 200:
                err = str(resp)

        if err is not None:
            LOG.error(_LE("Request to xCat server %(host)s failed: %(err)s") %
                      {'host': self.host, 'err': err})
            raise exception.zVMxCatRequestFailed(xcatserver=self.host,
                                                 err=err)

        return resp
Exemplo n.º 29
0
    def setup(self, network):
        """Create and initialize a device for network's DHCP on this host."""
        port = self.setup_dhcp_port(network)
        self._update_dhcp_port(network, port)
        interface_name = self.get_interface_name(network, port)

        if ip_lib.ensure_device_is_ready(interface_name, namespace=network.namespace):
            LOG.debug("Reusing existing device: %s.", interface_name)
        else:
            try:
                self.driver.plug(
                    network.id,
                    port.id,
                    interface_name,
                    port.mac_address,
                    namespace=network.namespace,
                    mtu=network.get("mtu"),
                )
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Unable to plug DHCP port for " "network %s. Releasing port."), network.id)
                    self.plugin.release_dhcp_port(network.id, port.device_id)

            self.fill_dhcp_udp_checksums(namespace=network.namespace)
        ip_cidrs = []
        for fixed_ip in port.fixed_ips:
            subnet = fixed_ip.subnet
            if not ipv6_utils.is_auto_address_subnet(subnet):
                net = netaddr.IPNetwork(subnet.cidr)
                ip_cidr = "%s/%s" % (fixed_ip.ip_address, net.prefixlen)
                ip_cidrs.append(ip_cidr)

        if self.driver.use_gateway_ips:
            # For each DHCP-enabled subnet, add that subnet's gateway
            # IP address to the Linux device for the DHCP port.
            for subnet in network.subnets:
                if not subnet.enable_dhcp:
                    continue
                gateway = subnet.gateway_ip
                if gateway:
                    net = netaddr.IPNetwork(subnet.cidr)
                    ip_cidrs.append("%s/%s" % (gateway, net.prefixlen))

        if self.conf.enable_isolated_metadata:
            ip_cidrs.append(METADATA_DEFAULT_CIDR)

        self.driver.init_l3(interface_name, ip_cidrs, namespace=network.namespace)

        self._set_default_route(network, interface_name)
        try:
            self._cleanup_stale_devices(network, port)
        except Exception:
            # catch everything as we don't want to fail because of
            # cleanup step
            LOG.error(_LE("Exception during stale dhcp device cleanup"))

        return interface_name
Exemplo n.º 30
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                agent_mode = self._get_agent_mode(binding.l3_agent)
                if agent_mode == constants.L3_AGENT_MODE_DVR:
                    # rescheduling from l3 dvr agent on compute node doesn't
                    # make sense. Router will be removed from that agent once
                    # there are no dvr serviceable ports on that compute node
                    LOG.warn(_LW('L3 DVR agent on node %(host)s is down. '
                                 'Not rescheduling from agent in \'dvr\' '
                                 'mode.'), {'host': binding.l3_agent.host})
                    continue
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
Exemplo n.º 31
0
 def _watch_process(self, callback, kill_event):
     while not kill_event.ready():
         try:
             output = callback()
             if not output and output != "":
                 break
         except Exception:
             LOG.exception(
                 _LE('An error occurred while communicating '
                     'with async process [%s].'), self.cmd)
             break
         # Ensure that watching a process with lots of output does
         # not block execution of other greenthreads.
         eventlet.sleep()
     # self._is_running being True indicates that the loop was
     # broken out of due to an error in the watched process rather
     # than the loop condition being satisfied.
     if self._is_running:
         self._is_running = False
         self._handle_process_error()
    def get_dvr_mac_address(self):
        try:
            self.get_dvr_mac_address_with_retry()
        except oslo_messaging.RemoteError as e:
            LOG.warning(
                _LW('L2 agent could not get DVR MAC address at '
                    'startup due to RPC error.  It happens when the '
                    'server does not support this RPC API.  Detailed '
                    'message: %s'), e)
        except oslo_messaging.MessagingTimeout:
            LOG.error(
                _LE('DVR: Failed to obtain a valid local '
                    'DVR MAC address - L2 Agent operating '
                    'in Non-DVR Mode'))

        if not self.in_distributed_mode():
            # switch all traffic using L2 learning
            # REVISIT(yamamoto): why to install the same flow as
            # setup_integration_br?
            self.int_br.install_normal()
Exemplo n.º 33
0
 def run_ofctl(self, cmd, args, process_input=None):
     full_args = ["ovs-ofctl", cmd, self.br_name] + args
     # TODO(kevinbenton): This error handling is really brittle and only
     # detects one specific type of failure. The callers of this need to
     # be refactored to expect errors so we can re-raise and they can
     # take appropriate action based on the type of error.
     for i in range(1, 11):
         try:
             return utils.execute(full_args, run_as_root=True,
                                  process_input=process_input)
         except Exception as e:
             if "failed to connect to socket" in str(e):
                 LOG.debug("Failed to connect to OVS. Retrying "
                           "in 1 second. Attempt: %s/10", i)
                 time.sleep(1)
                 continue
             LOG.error(_LE("Unable to execute %(cmd)s. Exception: "
                           "%(exception)s"),
                       {'cmd': full_args, 'exception': e})
             break
Exemplo n.º 34
0
 def get_connected_subports_for_trunk(self, trunk_id):
     """Return the list of subports present on the trunk bridge."""
     bridge = ovs_lib.OVSBridge(utils.gen_trunk_br_name(trunk_id))
     if not bridge.bridge_exists(bridge.br_name):
         return []
     try:
         ports = bridge.get_ports_attributes(
             'Interface', columns=['name', 'external_ids'])
         return [
             self.trunk_manager.get_port_uuid_from_external_ids(port)
             for port in ports if is_subport(port['name'])
         ]
     except (RuntimeError, tman.TrunkManagerError) as e:
         LOG.error(
             _LE("Failed to get subports for bridge %(bridge)s: "
                 "%(err)s"), {
                     'bridge': bridge.br_name,
                     'err': e
                 })
         return []
Exemplo n.º 35
0
 def _update_nova_inventory(self, event):
     for count in range(MAX_INVENTORY_UPDATE_RETRIES):
         ipv4_inventory = self.p_client.get_inventory(
             event.segment_id, IPV4_RESOURCE_CLASS)
         if event.total:
             ipv4_inventory['total'] += event.total
         if event.reserved:
             ipv4_inventory['reserved'] += event.reserved
         try:
             self.p_client.update_inventory(event.segment_id,
                                            ipv4_inventory,
                                            IPV4_RESOURCE_CLASS)
             return
         except n_exc.PlacementInventoryUpdateConflict:
             LOG.debug(
                 'Re-trying to update Nova IPv4 inventory for '
                 'routed network segment: %s', event.segment_id)
     LOG.error(
         _LE('Failed to update Nova IPv4 inventory for routed '
             'network segment: %s'), event.segment_id)
Exemplo n.º 36
0
    def get_devices_details_list_and_failed_devices(self,
                                                    rpc_context,
                                                    **kwargs):
        devices = []
        failed_devices = []
        cached_networks = {}
        for device in kwargs.pop('devices', []):
            try:
                devices.append(self.get_device_details(
                               rpc_context,
                               device=device,
                               cached_networks=cached_networks,
                               **kwargs))
            except Exception:
                LOG.error(_LE("Failed to get details for device %s"),
                          device)
                failed_devices.append(device)

        return {'devices': devices,
                'failed_devices': failed_devices}
Exemplo n.º 37
0
def check_read_netns():
    required = checks.netns_read_requires_helper()
    if not required and cfg.CONF.AGENT.use_helper_for_ns_read:
        LOG.warning(
            _LW("The user that is executing neutron can read the "
                "namespaces without using the root_helper. Disable "
                "the use_helper_for_ns_read option to avoid a "
                "performance impact."))
        # Don't fail because nothing is actually broken. Just not optimal.
        result = True
    elif required and not cfg.CONF.AGENT.use_helper_for_ns_read:
        LOG.error(
            _LE("The user that is executing neutron does not have "
                "permissions to read the namespaces. Enable the "
                "use_helper_for_ns_read configuration option."))
        result = False
    else:
        # everything is configured appropriately
        result = True
    return result
Exemplo n.º 38
0
    def delete_servicechain_instance(self, context, servicechain_instance_id):
        session = context.session
        with session.begin(subtransactions=True):
            sc_instance = self.get_servicechain_instance(
                                                context,
                                                servicechain_instance_id)
            sc_context = servicechain_context.ServiceChainInstanceContext(
                self, context, sc_instance)
            self.driver_manager.delete_servicechain_instance_precommit(
                sc_context)
            super(ServiceChainPlugin, self).delete_servicechain_instance(
                context, servicechain_instance_id)

        try:
            self.driver_manager.delete_servicechain_instance_postcommit(
                sc_context)
        except Exception:
            LOG.exception(_LE("delete_servicechain_instance_postcommit failed "
                            "for servicechain_instance %s"),
                          servicechain_instance_id)
Exemplo n.º 39
0
    def fetch_and_sync_all_routers(self, context, ns_manager):
        prev_router_ids = set(self.router_info)
        timestamp = timeutils.utcnow()

        try:
            if self.conf.router_id:
                routers = self.plugin_rpc.get_routers(context,
                                                      [self.conf.router_id])
            else:
                routers = self.plugin_rpc.get_routers(context)
        except oslo_messaging.MessagingException:
            LOG.exception(_LE("Failed synchronizing routers due to RPC error"))
            raise n_exc.AbortSyncRouters()
        else:
            LOG.debug('Processing :%r', routers)
            for r in routers:
                ns_manager.keep_router(r['id'])
                if r.get('distributed'):
                    # need to keep fip namespaces as well
                    ext_net_id = (r['external_gateway_info'] or {}).get(
                        'network_id')
                    if ext_net_id:
                        ns_manager.keep_ext_net(ext_net_id)
                update = queue.RouterUpdate(r['id'],
                                            queue.PRIORITY_SYNC_ROUTERS_TASK,
                                            router=r,
                                            timestamp=timestamp)
                self._queue.add(update)
            self.fullsync = False
            LOG.debug("periodic_sync_routers_task successfully completed")

            curr_router_ids = set([r['id'] for r in routers])

            # Delete routers that have disappeared since the last sync
            for router_id in prev_router_ids - curr_router_ids:
                ns_manager.keep_router(router_id)
                update = queue.RouterUpdate(router_id,
                                            queue.PRIORITY_SYNC_ROUTERS_TASK,
                                            timestamp=timestamp,
                                            action=queue.DELETE_ROUTER)
                self._queue.add(update)
Exemplo n.º 40
0
 def call(self, ctxt, method, **kwargs):
     # two methods with the same name in different namespaces should
     # be tracked independently
     if self._original_context.target.namespace:
         scoped_method = '%s.%s' % (self._original_context.target.namespace,
                                    method)
     else:
         scoped_method = method
     # set the timeout from the global method timeout tracker for this
     # method
     self._original_context.timeout = self._METHOD_TIMEOUTS[scoped_method]
     try:
         return self._original_context.call(ctxt, method, **kwargs)
     except oslo_messaging.MessagingTimeout:
         with excutils.save_and_reraise_exception():
             wait = random.uniform(
                 0,
                 min(self._METHOD_TIMEOUTS[scoped_method],
                     TRANSPORT.conf.rpc_response_timeout))
             LOG.error(
                 _LE("Timeout in RPC method %(method)s. Waiting for "
                     "%(wait)s seconds before next attempt. If the "
                     "server is not down, consider increasing the "
                     "rpc_response_timeout option as Neutron "
                     "server(s) may be overloaded and unable to "
                     "respond quickly enough."), {
                         'wait': int(round(wait)),
                         'method': scoped_method
                     })
             new_timeout = min(self._original_context.timeout * 2,
                               self.get_max_timeout())
             if new_timeout > self._METHOD_TIMEOUTS[scoped_method]:
                 LOG.warning(
                     _LW("Increasing timeout for %(method)s calls "
                         "to %(new)s seconds. Restart the agent to "
                         "restore it to the default value."), {
                             'method': scoped_method,
                             'new': new_timeout
                         })
                 self._METHOD_TIMEOUTS[scoped_method] = new_timeout
             time.sleep(wait)
Exemplo n.º 41
0
 def _provision_tenant_private_network(self, context, tenant_id):
     """Create a tenant private network/subnets."""
     network = None
     try:
         network_args = {
             'name': 'auto_allocated_network',
             'admin_state_up': False,
             'tenant_id': tenant_id,
             'shared': False
         }
         network = p_utils.create_network(self.core_plugin, context,
                                          {'network': network_args})
         subnets = []
         for pool in self._get_supported_subnetpools(context):
             subnet_args = {
                 'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
                 'network_id': network['id'],
                 'tenant_id': tenant_id,
                 'ip_version': pool['ip_version'],
                 'subnetpool_id': pool['id'],
             }
             subnets.append(
                 p_utils.create_subnet(self.core_plugin, context,
                                       {'subnet': subnet_args}))
         return subnets
     except (c_exc.SubnetAllocationError, ValueError, n_exc.BadRequest,
             n_exc.NotFound) as e:
         LOG.error(
             _LE("Unable to auto allocate topology for tenant "
                 "%(tenant_id)s due to missing or unmet "
                 "requirements. Reason: %(reason)s"), {
                     'tenant_id': tenant_id,
                     'reason': e
                 })
         if network:
             self._cleanup(context, network['id'])
         raise exceptions.AutoAllocationFailure(
             reason=_("Unable to provide tenant private network"))
     except Exception as e:
         network_id = network['id'] if network else None
         raise exceptions.UnknownProvisioningError(e, network_id=network_id)
Exemplo n.º 42
0
    def create_servicechain_spec(self, context, servicechain_spec):
        session = context.session
        with session.begin(subtransactions=True):
            result = super(ServiceChainPlugin, self).create_servicechain_spec(
                context, servicechain_spec)
            self._validate_shared_create(context, result, 'servicechain_spec')
            sc_context = servicechain_context.ServiceChainSpecContext(
                self, context, result)
            self.driver_manager.create_servicechain_spec_precommit(
                sc_context)

        try:
            self.driver_manager.create_servicechain_spec_postcommit(sc_context)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("driver_manager.create_servicechain_postcommit "
                              "failed, deleting servicechain_spec %s"),
                          result['id'])
                self.delete_servicechain_spec(context, result['id'])

        return result
Exemplo n.º 43
0
 def request_endpoint_details(self, context, **kwargs):
     LOG.debug("APIC AIM MD handling get_endpoint_details for: %s", kwargs)
     try:
         request = kwargs.get('request')
         result = {
             'device': request['device'],
             'timestamp': request['timestamp'],
             'request_id': request['request_id'],
             'gbp_details': None,
             'neutron_details': None
         }
         result['gbp_details'] = self._get_gbp_details(context, request)
         result['neutron_details'] = ml2_rpc.RpcCallbacks(
             None, None).get_device_details(context, **request)
         return result
     except Exception as e:
         LOG.error(
             _LE("An exception has occurred while requesting device "
                 "gbp details for %s"), request.get('device'))
         LOG.exception(e)
         return None
Exemplo n.º 44
0
 def _extract(self, resource_type, resource_id, field):
     # NOTE(salv-orlando): This check currently assumes the parent
     # resource is handled by the core plugin. It might be worth
     # having a way to map resources to plugins so to make this
     # check more general
     f = getattr(directory.get_plugin(), 'get_%s' % resource_type)
     # f *must* exist, if not found it is better to let neutron
     # explode. Check will be performed with admin context
     try:
         data = f(context.get_admin_context(), resource_id, fields=[field])
     except exceptions.NotFound as e:
         # NOTE(kevinbenton): a NotFound exception can occur if a
         # list operation is happening at the same time as one of
         # the parents and its children being deleted. So we issue
         # a RetryRequest so the API will redo the lookup and the
         # problem items will be gone.
         raise db_exc.RetryRequest(e)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE('Policy check error while calling %s!'), f)
     return data[field]
Exemplo n.º 45
0
    def get_vf_state(self, vf_index):
        """Get vf state {True/False}

        @param vf_index: vf index
        @todo: Handle "auto" state
        """
        try:
            out = self._as_root([], "link", ("show", self.dev_name))
        except Exception as e:
            LOG.exception(_LE("Failed executing ip command"))
            raise exc.IpCommandDeviceError(dev_name=self.dev_name,
                                           reason=e)
        vf_lines = self._get_vf_link_show([vf_index], out)
        if vf_lines:
            vf_details = self._parse_vf_link_show(vf_lines[0])
            if vf_details:
                state = vf_details.get("link-state",
                                       self.LinkState.DISABLE)
            if state != self.LinkState.DISABLE:
                return True
        return False
Exemplo n.º 46
0
def vf_management_supported():
    is_supported = True
    required_caps = (
        ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
        ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK,
        ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE)
    try:
        vf_section = ip_link_support.IpLinkSupport.get_vf_mgmt_section()
        for cap in required_caps:
            if not ip_link_support.IpLinkSupport.vf_mgmt_capability_supported(
                    vf_section, cap):
                is_supported = False
                LOG.debug(
                    "ip link command does not support "
                    "vf capability '%(cap)s'", cap)
    except ip_link_support.UnsupportedIpLinkCommand:
        LOG.exception(
            _LE("Unexpected exception while checking supported "
                "ip link command"))
        return False
    return is_supported
Exemplo n.º 47
0
Arquivo: db.py Projeto: zl2017/neutron
 def _build_topology(self, context, tenant_id, default_external_network):
     """Build the network topology and returns its network UUID."""
     try:
         subnets = self._provision_tenant_private_network(
             context, tenant_id)
         network_id = subnets[0]['network_id']
         router = self._provision_external_connectivity(
             context, default_external_network, subnets, tenant_id)
         network_id = self._save(
             context, tenant_id, network_id, router['id'], subnets)
         return network_id
     except exceptions.UnknownProvisioningError as e:
         # Clean partially provisioned topologies, and reraise the
         # error. If it can be retried, so be it.
         LOG.error(_LE("Unknown error while provisioning topology for "
                       "tenant %(tenant_id)s. Reason: %(reason)s"),
                   {'tenant_id': tenant_id, 'reason': e})
         self._cleanup(
             context, network_id=e.network_id,
             router_id=e.router_id, subnets=e.subnets)
         raise e.error
Exemplo n.º 48
0
 def dispose_trunk(self, trunk_bridge):
     """Clean up all the OVS resources associated to trunk_bridge."""
     ovsdb = trunk_bridge.ovsdb
     patch_peers = []
     try:
         patch_peers = trunk_bridge.get_ports_attributes(
             'Interface', columns=['options'])
         with trunk_bridge.ovsdb.transaction() as txn:
             for patch_peer in patch_peers:
                 peer_name = patch_peer['options'].get('peer')
                 if peer_name:
                     txn.add(ovsdb.del_port(peer_name, self.br_int.br_name))
             txn.add(ovsdb.del_br(trunk_bridge.br_name))
         LOG.debug("Deleted bridge '%s' and patch peers '%s'.",
                   trunk_bridge.br_name, patch_peers)
     except RuntimeError as e:
         LOG.error(_LE("Could not delete '%(peers)s' associated to "
                       "trunk bridge %(name)s. Reason: %(reason)s."),
                   {'peers': patch_peers,
                    'name': trunk_bridge.br_name,
                    'reason': e})
Exemplo n.º 49
0
    def _wait_for_network_function_delete_completion(self, context,
                                                     network_function_id):
        time_waited = 0
        network_function = None
        while time_waited < cfg.CONF.nfp_node_driver.service_delete_timeout:
            network_function = self.nfp_notifier.get_network_function(
                context.plugin_context, network_function_id)
            if not network_function:
                break
            eventlet.sleep(5)
            time_waited = time_waited + 5

        self._delete_node_instance_network_function_map(
            context.plugin_session, context.current_node['id'],
            context.instance['id'])

        if network_function:
            LOG.error(
                _LE("Delete network function %(network_function)s "
                    "failed"), {'network_function': network_function_id})
            raise NodeInstanceDeleteFailed()
Exemplo n.º 50
0
def destroy_namespace(conf, namespace, force=False):
    """Destroy a given namespace.

    If force is True, then dhcp (if it exists) will be disabled and all
    devices will be forcibly removed.
    """

    try:
        ip = ip_lib.IPWrapper(namespace=namespace)

        if force:
            kill_dhcp(conf, namespace)
            # NOTE: The dhcp driver will remove the namespace if is it empty,
            # so a second check is required here.
            if ip.netns.exists(namespace):
                for device in ip.get_devices(exclude_loopback=True):
                    unplug_device(conf, device)

        ip.garbage_collect_namespace()
    except Exception:
        LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
Exemplo n.º 51
0
    def is_macvtap_assigned(cls, ifname):
        """Check if vf has macvtap interface assigned

        Parses the output of ip link show command and checks
        if macvtap[0-9]+@<vf interface> regex matches the
        output.
        @param ifname: vf interface name
        @return: True on match otherwise False
        """
        try:
            out = cls._execute([], "link", ("show", ), run_as_root=True)
        except Exception as e:
            LOG.error(_LE("Failed executing ip command: %s"), e)
            raise exc.IpCommandError(reason=e)

        for line in out.splitlines():
            pattern_match = cls.MACVTAP_REG_EX.match(line)
            if pattern_match:
                if ifname == pattern_match.group('vf_interface'):
                    return True
        return False
Exemplo n.º 52
0
    def get_assigned_macs(self, vf_list):
        """Get assigned mac addresses for vf list.

        @param vf_list: list of vf indexes
        @return: dict mapping of vf to mac
        """
        try:
            out = self._as_root([], "link", ("show", self.dev_name))
        except Exception as e:
            LOG.exception(_LE("Failed executing ip command"))
            raise exc.IpCommandDeviceError(dev_name=self.dev_name, reason=e)
        vf_to_mac_mapping = {}
        vf_lines = self._get_vf_link_show(vf_list, out)
        if vf_lines:
            for vf_line in vf_lines:
                vf_details = self._parse_vf_link_show(vf_line)
                if vf_details:
                    vf_num = vf_details.get('vf')
                    vf_mac = vf_details.get("MAC")
                    vf_to_mac_mapping[vf_num] = vf_mac
        return vf_to_mac_mapping
Exemplo n.º 53
0
    def network_delete(self, context, **kwargs):
        LOG.debug("network_delete received")
        network_id = kwargs.get('network_id')

        if network_id not in self.network_map:
            LOG.error(_LE("Network %s is not available."), network_id)
            return

        segment = self.network_map.get(network_id)
        if segment and segment.network_type == p_constants.TYPE_VLAN:
            if_mappings = self.agent.mgr.interface_mappings
            vlan_device_name = macvtap_common.get_vlan_device_name(
                if_mappings[segment.physical_network],
                str(segment.segmentation_id))
            ip_dev = ip_lib.IPDevice(vlan_device_name)
            if ip_dev.exists():
                LOG.debug("Delete %s", ip_dev.name)
                ip_dev.link.delete()
            else:
                LOG.debug("Cannot delete vlan device %s; it does not exist",
                          vlan_device_name)
Exemplo n.º 54
0
 def extend_network_dict_provider(self, context, network):
     id = network['id']
     segments = db.get_network_segments(context.session, id)
     if not segments:
         LOG.error(_LE("Network %s has no segments"), id)
         for attr in provider.ATTRIBUTES:
             network[attr] = None
     elif len(segments) > 1:
         network[mpnet.SEGMENTS] = [{
             provider.NETWORK_TYPE:
             segment[api.NETWORK_TYPE],
             provider.PHYSICAL_NETWORK:
             segment[api.PHYSICAL_NETWORK],
             provider.SEGMENTATION_ID:
             segment[api.SEGMENTATION_ID]
         } for segment in segments]
     else:
         segment = segments[0]
         network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE]
         network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK]
         network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID]
Exemplo n.º 55
0
Arquivo: db.py Projeto: zl2017/neutron
def partial_port_ids_to_full_ids(context, partial_ids):
    """Takes a list of the start of port IDs and returns full IDs.

    Returns dictionary of partial IDs to full IDs if a single match
    is found.
    """
    result = {}
    to_full_query = (context.session.query(models_v2.Port.id).filter(
        or_(*[models_v2.Port.id.startswith(p) for p in partial_ids])))
    candidates = [match[0] for match in to_full_query]
    for partial_id in partial_ids:
        matching = [c for c in candidates if c.startswith(partial_id)]
        if len(matching) == 1:
            result[partial_id] = matching[0]
            continue
        if len(matching) < 1:
            LOG.info("No ports have port_id starting with %s", partial_id)
        elif len(matching) > 1:
            LOG.error(_LE("Multiple ports have port_id starting with %s"),
                      partial_id)
    return result
 def validate_vxlan_group_with_local_ip(self):
     if not cfg.CONF.VXLAN.vxlan_group:
         return
     try:
         ip_addr = netaddr.IPAddress(self.local_ip)
         # Ensure the configured group address/range is valid and multicast
         group_net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
         if not group_net.is_multicast():
             raise ValueError()
         if not ip_addr.version == group_net.version:
             raise ValueError()
     except (netaddr.core.AddrFormatError, ValueError):
         LOG.error(
             _LE("Invalid VXLAN Group: %(group)s, must be an address "
                 "or network (in CIDR notation) in a multicast "
                 "range of the same address family as local_ip: "
                 "%(ip)s"), {
                     'group': cfg.CONF.VXLAN.vxlan_group,
                     'ip': self.local_ip
                 })
         sys.exit(1)
Exemplo n.º 57
0
 def get_snat_port_for_internal_port(self, int_port, snat_ports=None):
     """Return the SNAT port for the given internal interface port."""
     if snat_ports is None:
         snat_ports = self.get_snat_interfaces()
     fixed_ip = int_port['fixed_ips'][0]
     subnet_id = fixed_ip['subnet_id']
     if snat_ports:
         match_port = [
             p for p in snat_ports
             if p['fixed_ips'][0]['subnet_id'] == subnet_id
         ]
         if match_port:
             return match_port[0]
         else:
             LOG.error(
                 _LE('DVR: SNAT port not found in the list '
                     '%(snat_list)s for the given router '
                     ' internal port %(int_p)s'), {
                         'snat_list': snat_ports,
                         'int_p': int_port
                     })
Exemplo n.º 58
0
    def _is_session_semantic_violated(context, resource, event):
        """Return True and print an ugly error on transaction violation.

        This code is to print ugly errors when AFTER_CREATE/UPDATE
        event transaction semantics are violated by other parts of
        the code.
        """
        if not context.session.is_active:
            return False
        stack = traceback.extract_stack()
        stack = "".join(traceback.format_list(stack))
        LOG.error(
            _LE("This handler is supposed to handle AFTER "
                "events, as in 'AFTER it's committed', "
                "not BEFORE. Offending resource event: "
                "%(r)s, %(e)s. Location:\n%(l)s"), {
                    'r': resource,
                    'e': event,
                    'l': stack
                })
        return True
Exemplo n.º 59
0
def provisioning_complete(context, object_id, object_type, entity):
    """Mark that the provisioning for object_id has been completed by entity.

    Marks that an entity has finished provisioning an object. If there are
    no remaining provisioning components, a callback will be triggered
    indicating that provisioning has been completed for the object. Subscribers
    to this callback must be idempotent because it may be called multiple
    times in high availability deployments.

    :param context: neutron api request context
    :param object_id: ID of object that has been provisioned
    :param object_type: callback resource type of the object
    :param entity: The entity that has provisioned the object
    """
    log_dict = {'oid': object_id, 'entity': entity, 'otype': object_type}
    # this can't be called in a transaction to avoid REPEATABLE READ
    # tricking us into thinking there are remaining provisioning components
    if context.session.is_active:
        raise RuntimeError(_LE("Must not be called in a transaction"))
    standard_attr_id = _get_standard_attr_id(context, object_id, object_type)
    if not standard_attr_id:
        return
    if remove_provisioning_component(context, object_id, object_type, entity,
                                     standard_attr_id):
        LOG.debug(
            "Provisioning for %(otype)s %(oid)s completed by entity "
            "%(entity)s.", log_dict)
    # now with that committed, check if any records are left. if None, emit
    # an event that provisioning is complete.
    records = context.session.query(pb_model.ProvisioningBlock).filter_by(
        standard_attr_id=standard_attr_id).count()
    if not records:
        LOG.debug(
            "Provisioning complete for %(otype)s %(oid)s triggered by "
            "entity %(entity)s.", log_dict)
        registry.notify(object_type,
                        PROVISIONING_COMPLETE,
                        'neutron.db.provisioning_blocks',
                        context=context,
                        object_id=object_id)
Exemplo n.º 60
0
    def daemon_loop(self):
        LOG.info(_LI("%s Agent RPC Daemon Started!"), self.agent_type)
        device_info = None
        sync = True

        while True:
            start = time.time()

            if self.fullsync:
                sync = True
                self.fullsync = False

            if sync:
                LOG.info(_LI("%s Agent out of sync with plugin!"),
                         self.agent_type)

            device_info = self.scan_devices(previous=device_info, sync=sync)
            sync = False

            if (self._device_info_has_changes(device_info)
                    or self.sg_agent.firewall_refresh_needed()):
                LOG.debug("Agent loop found changes! %s", device_info)
                try:
                    sync = self.process_network_devices(device_info)
                except Exception:
                    LOG.exception(_LE("Error in agent loop. Devices info: %s"),
                                  device_info)
                    sync = True

            # sleep till end of polling interval
            elapsed = (time.time() - start)
            if (elapsed < self.polling_interval):
                time.sleep(self.polling_interval - elapsed)
            else:
                LOG.debug(
                    "Loop iteration exceeded interval "
                    "(%(polling_interval)s vs. %(elapsed)s)!", {
                        'polling_interval': self.polling_interval,
                        'elapsed': elapsed
                    })