Exemplo n.º 1
0
 def treat_devices_removed(self, devices):
     resync = False
     self.sg_agent.remove_devices_filter(devices)
     for device in devices:
         LOG.info(_LI("Attachment %s removed"), device)
         details = None
         try:
             details = self.plugin_rpc.update_device_down(self.context,
                                                          device,
                                                          self.agent_id,
                                                          cfg.CONF.host)
         except Exception:
             LOG.exception(_LE("Error occurred while removing port %s"),
                           device)
             resync = True
         if details and details['exists']:
             LOG.info(_LI("Port %s updated."), device)
         else:
             LOG.debug("Device %s not defined on plugin", device)
         port_id = self._clean_network_ports(device)
         self.ext_manager.delete_port(self.context,
                                      {'device': device,
                                       'port_id': port_id})
         registry.notify(local_resources.PORT_DEVICE, events.AFTER_DELETE,
                         self, context=self.context, device=device,
                         port_id=port_id)
     if self.prevent_arp_spoofing:
         self.mgr.delete_arp_spoofing_protection(devices)
     return resync
Exemplo n.º 2
0
 def _schedule_network(self, context, network_id, dhcp_notifier):
     LOG.info(_LI("Scheduling unhosted network %s"), network_id)
     try:
         # TODO(enikanorov): have to issue redundant db query
         # to satisfy scheduling interface
         network = self.get_network(context, network_id)
         agents = self.schedule_network(context, network)
         if not agents:
             LOG.info(_LI("Failed to schedule network %s, "
                          "no eligible agents or it might be "
                          "already scheduled by another server"),
                      network_id)
             return
         if not dhcp_notifier:
             return
         for agent in agents:
             LOG.info(_LI("Adding network %(net)s to agent "
                          "%(agent)s on host %(host)s"),
                      {'net': network_id,
                       'agent': agent.id,
                       'host': agent.host})
             dhcp_notifier.network_added_to_agent(
                 context, network_id, agent.host)
     except Exception:
         # catching any exception during scheduling
         # so if _schedule_network is invoked in the loop it could
         # continue in any case
         LOG.exception(_LE("Failed to schedule network %s"), network_id)
Exemplo n.º 3
0
    def refresh_firewall(self, device_ids=None):
        LOG.info(_LI("Refresh firewall rules"))
        if not device_ids:
            device_ids = self.firewall.ports.keys()
            if not device_ids:
                LOG.info(_LI("No ports here to refresh firewall"))
                return
        if self.use_enhanced_rpc:
            devices_info = self.plugin_rpc.security_group_info_for_devices(
                self.context, device_ids)
            devices = devices_info['devices']
            security_groups = devices_info['security_groups']
            security_group_member_ips = devices_info['sg_member_ips']
        else:
            devices = self.plugin_rpc.security_group_rules_for_devices(
                self.context, device_ids)

        with self.firewall.defer_apply():
            if self.use_enhanced_rpc:
                LOG.debug("Update security group information for ports %s",
                          devices.keys())
                self._update_security_group_info(
                    security_groups, security_group_member_ips)
            for device in devices.values():
                LOG.debug("Update port filter for %s", device['device'])
                self.firewall.update_port_filter(device)
Exemplo n.º 4
0
    def treat_devices_added_updated(self, devices_info):
        try:
            macs_list = set([device_info[0] for device_info in devices_info])
            devices_details_list = self.plugin_rpc.get_devices_details_list(
                self.context, macs_list, self.agent_id)
        except Exception as e:
            LOG.debug("Unable to get port details for devices "
                      "with MAC addresses %(devices)s: %(e)s",
                      {'devices': macs_list, 'e': e})
            # resync is needed
            return True

        for device_details in devices_details_list:
            device = device_details['device']
            LOG.debug("Port with MAC address %s is added", device)

            if 'port_id' in device_details:
                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
                         {'device': device, 'details': device_details})
                port_id = device_details['port_id']
                profile = device_details['profile']
                spoofcheck = device_details.get('port_security_enabled', True)
                self.treat_device(device,
                                  profile.get('pci_slot'),
                                  device_details['admin_state_up'],
                                  spoofcheck)
                self._update_network_ports(device_details['network_id'],
                                           port_id,
                                           (device, profile.get('pci_slot')))
                self.ext_manager.handle_port(self.context, device_details)
            else:
                LOG.info(_LI("Device with MAC %s not defined on plugin"),
                         device)
        return False
Exemplo n.º 5
0
def main():
    common_config.init(sys.argv[1:])

    common_config.setup_logging()
    try:
        config_parser = SriovNicAgentConfigParser()
        config_parser.parse()
        device_mappings = config_parser.device_mappings
        exclude_devices = config_parser.exclude_devices

    except ValueError:
        LOG.exception(_LE("Failed on Agent configuration parse. "
                          "Agent terminated!"))
        raise SystemExit(1)
    LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
    LOG.info(_LI("Exclude Devices: %s"), exclude_devices)

    polling_interval = cfg.CONF.AGENT.polling_interval
    try:
        agent = SriovNicSwitchAgent(device_mappings,
                                    exclude_devices,
                                    polling_interval)
    except exc.SriovNicError:
        LOG.exception(_LE("Agent Initialization Failed"))
        raise SystemExit(1)
    # Start everything.
    LOG.info(_LI("Agent initialized successfully, now running... "))
    agent.daemon_loop()
Exemplo n.º 6
0
def main():
    common_config.init(sys.argv[1:])

    common_config.setup_logging()
    try:
        interface_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
                      "Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_LI("Interface mappings: %s"), interface_mappings)

    try:
        bridge_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing bridge_mappings failed: %s. "
                      "Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_LI("Bridge mappings: %s"), bridge_mappings)

    manager = LinuxBridgeManager(bridge_mappings, interface_mappings)

    polling_interval = cfg.CONF.AGENT.polling_interval
    quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
    agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout,
                               constants.AGENT_TYPE_LINUXBRIDGE,
                               LB_AGENT_BINARY)
    LOG.info(_LI("Agent initialized successfully, now running... "))
    launcher = service.launch(cfg.CONF, agent)
    launcher.wait()
Exemplo n.º 7
0
    def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
        if self.eswitch_mgr.device_exists(device, pci_slot):
            try:
                self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
                                                       spoofcheck)
            except Exception:
                LOG.warning(_LW("Failed to set spoofcheck for device %s"),
                            device)
            LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
                     {"device": device, "spoofcheck": spoofcheck})

            try:
                self.eswitch_mgr.set_device_state(device, pci_slot,
                                                  admin_state_up)
            except exc.IpCommandOperationNotSupportedError:
                LOG.warning(_LW("Device %s does not support state change"),
                            device)
            except exc.SriovNicError:
                LOG.warning(_LW("Failed to set device %s state"), device)
                return
            if admin_state_up:
                # update plugin about port status
                self.plugin_rpc.update_device_up(self.context,
                                                 device,
                                                 self.agent_id,
                                                 cfg.CONF.host)
            else:
                self.plugin_rpc.update_device_down(self.context,
                                                   device,
                                                   self.agent_id,
                                                   cfg.CONF.host)
        else:
            LOG.info(_LI("No device with MAC %s defined on agent."), device)
Exemplo n.º 8
0
def remove_empty_bridges():
    try:
        interface_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e)
        sys.exit(1)
    LOG.info(_LI("Interface mappings: %s."), interface_mappings)

    try:
        bridge_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing bridge_mappings failed: %s."), e)
        sys.exit(1)
    LOG.info(_LI("Bridge mappings: %s."), bridge_mappings)

    lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager(
        bridge_mappings, interface_mappings)

    bridge_names = lb_manager.get_deletable_bridges()
    for bridge_name in bridge_names:
        if lb_manager.get_tap_devices_count(bridge_name):
            continue

        try:
            lb_manager.delete_bridge(bridge_name)
            LOG.info(_LI("Linux bridge %s deleted"), bridge_name)
        except RuntimeError:
            LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name)
    LOG.info(_LI("Linux bridge cleanup completed successfully"))
    def _run_openstack_l3_cmds(self, commands, server):
        """Execute/sends a CAPI (Command API) command to EOS.

        In this method, list of commands is appended with prefix and
        postfix commands - to make is understandble by EOS.

        :param commands : List of command to be executed on EOS.
        :param server: Server endpoint on the Arista switch to be configured
        """
        command_start = ['enable', 'configure']
        command_end = ['exit']
        full_command = command_start + commands + command_end

        LOG.info(_LI('Executing command on Arista EOS: %s'), full_command)

        try:
            # this returns array of return values for every command in
            # full_command list
            ret = server.runCmds(version=1, cmds=full_command)
            LOG.info(_LI('Results of execution on Arista EOS: %s'), ret)

        except Exception:
            msg = (_('Error occurred while trying to execute '
                     'commands %(cmd)s on EOS %(host)s') %
                   {'cmd': full_command, 'host': server})
            LOG.exception(msg)
            raise arista_exc.AristaServicePluginRpcError(msg=msg)
Exemplo n.º 10
0
    def delete_router(self, context, id):
        router_db = self._get_router(context, id)
        super(L3_HA_NAT_db_mixin, self).delete_router(context, id)

        if router_db.extra_attributes.ha:
            ha_network = self.get_ha_network(context,
                                             router_db.tenant_id)
            if ha_network:
                self._delete_vr_id_allocation(
                    context, ha_network, router_db.extra_attributes.ha_vr_id)
                self._delete_ha_interfaces(context, router_db.id)

                # always attempt to cleanup the network as the router is
                # deleted. the core plugin will stop us if its in use
                try:
                    self._delete_ha_network(context, ha_network)
                except (n_exc.NetworkNotFound,
                        orm.exc.ObjectDeletedError):
                    LOG.debug(
                        "HA network for tenant %s was already deleted.",
                        router_db.tenant_id)
                except sa.exc.InvalidRequestError:
                    LOG.info(_LI("HA network %s can not be deleted."),
                             ha_network.network_id)
                except n_exc.NetworkInUse:
                    # network is still in use, this is normal so we don't
                    # log anything
                    pass
                else:
                    LOG.info(_LI("HA network %(network)s was deleted as "
                                 "no HA routers are present in tenant "
                                 "%(tenant)s."),
                             {'network': ha_network.network_id,
                              'tenant': router_db.tenant_id})
Exemplo n.º 11
0
    def daemon_loop(self):
        LOG.info(_LI("LinuxBridge Agent RPC Daemon Started!"))
        device_info = None
        sync = True

        while True:
            start = time.time()

            if self.fullsync:
                sync = True
                self.fullsync = False

            if sync:
                LOG.info(_LI("Agent out of sync with plugin!"))

            device_info = self.scan_devices(previous=device_info, sync=sync)
            sync = False

            if self._device_info_has_changes(device_info) or self.sg_agent.firewall_refresh_needed():
                LOG.debug("Agent loop found changes! %s", device_info)
                try:
                    sync = self.process_network_devices(device_info)
                except Exception:
                    LOG.exception(_LE("Error in agent loop. Devices info: %s"), device_info)
                    sync = True

            # sleep till end of polling interval
            elapsed = time.time() - start
            if elapsed < self.polling_interval:
                time.sleep(self.polling_interval - elapsed)
            else:
                LOG.debug(
                    "Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!",
                    {"polling_interval": self.polling_interval, "elapsed": elapsed},
                )
Exemplo n.º 12
0
    def delete_subnet_precommit(self, context):
        LOG.info(_LI("APIC AIM MD deleting subnet: %s"), context.current)

        gateway_ip_mask = self._gateway_ip_mask(context.current)
        if gateway_ip_mask:
            session = context._plugin_context.session

            network_id = context.current['network_id']
            # REVISIT(rkukura): Should Ml2Plus extend SubnetContext
            # with network?
            network = (session.query(models_v2.Network).
                       filter_by(id=network_id).
                       one())

            tenant_id = network.tenant_id
            tenant_name = self.name_mapper.tenant(session, tenant_id)
            LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
                     {'id': tenant_id, 'apic_name': tenant_name})

            network_name = network.name
            bd_name = self.name_mapper.network(session, network_id,
                                               network_name)
            LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
                         "%(apic_name)s"),
                     {'id': network_id, 'name': network_name,
                      'apic_name': bd_name})

            aim_ctx = aim_context.AimContext(session)

            subnet = aim_resource.Subnet(tenant_name=tenant_name,
                                         bd_name=bd_name,
                                         gw_ip_mask=gateway_ip_mask)
            self.aim.delete(aim_ctx, subnet)
Exemplo n.º 13
0
    def delete_network_precommit(self, context):
        LOG.info(_LI("APIC AIM MD deleting network: %s"), context.current)

        session = context._plugin_context.session

        tenant_id = context.current['tenant_id']
        tenant_name = self.name_mapper.tenant(session, tenant_id)
        LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
                 {'id': tenant_id, 'apic_name': tenant_name})

        id = context.current['id']
        bd_name = self.name_mapper.network(session, id)
        LOG.info(_LI("Mapped network_id %(id)s to %(apic_name)s"),
                 {'id': id, 'apic_name': bd_name})

        aim_ctx = aim_context.AimContext(session)

        epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
                                         app_profile_name=AP_NAME,
                                         name=bd_name)
        self.aim.delete(aim_ctx, epg)

        bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
                                       name=bd_name)
        self.aim.delete(aim_ctx, bd)

        self.name_mapper.delete_apic_name(session, id)
Exemplo n.º 14
0
    def delete_router(self, context, id):
        router_db = self._get_router(context, id)
        super(L3_HA_NAT_db_mixin, self).delete_router(context, id)

        if router_db.extra_attributes.ha:
            ha_network = self.get_ha_network(context, router_db.tenant_id)
            if ha_network:
                self._delete_vr_id_allocation(context, ha_network, router_db.extra_attributes.ha_vr_id)
                self._delete_ha_interfaces(context, router_db.id)

                # In case that create HA router failed because of the failure
                # in HA network creation. So here put this deleting HA network
                # procedure under 'if ha_network' block.
                if not self._ha_routers_present(context, router_db.tenant_id):
                    try:
                        self._delete_ha_network(context, ha_network)
                    except (n_exc.NetworkNotFound, orm.exc.ObjectDeletedError):
                        LOG.debug("HA network for tenant %s was already deleted.", router_db.tenant_id)
                    except sa.exc.InvalidRequestError:
                        LOG.info(_LI("HA network %s can not be deleted."), ha_network.network_id)
                    except n_exc.NetworkInUse:
                        LOG.debug("HA network %s is still in use.", ha_network.network_id)
                    else:
                        LOG.info(
                            _LI(
                                "HA network %(network)s was deleted as "
                                "no HA routers are present in tenant "
                                "%(tenant)s."
                            ),
                            {"network": ha_network.network_id, "tenant": router_db.tenant_id},
                        )
Exemplo n.º 15
0
    def ensure_tenant(self, plugin_context, tenant_id):
        LOG.info(_LI("APIC AIM MD ensuring tenant_id: %s"), tenant_id)

        self.project_name_cache.ensure_project(tenant_id)

        # TODO(rkukura): Move the following to precommit methods so
        # AIM tenants and application profiles are created whenever
        # needed.
        session = plugin_context.session
        with session.begin(subtransactions=True):
            project_name = self.project_name_cache.get_project_name(tenant_id)
            tenant_name = self.name_mapper.tenant(session, tenant_id,
                                                  project_name)
            LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
                     {'id': tenant_id, 'apic_name': tenant_name})

            aim_ctx = aim_context.AimContext(session)

            tenant = aim_resource.Tenant(name=tenant_name)
            if not self.aim.get(aim_ctx, tenant):
                self.aim.create(aim_ctx, tenant)

            ap = aim_resource.ApplicationProfile(tenant_name=tenant_name,
                                                 name=AP_NAME)
            if not self.aim.get(aim_ctx, ap):
                self.aim.create(aim_ctx, ap)
Exemplo n.º 16
0
    def sync_state(self, networks=None):
        """Sync the local DHCP state with Neutron. If no networks are passed,
        or 'None' is one of the networks, sync all of the networks.
        """
        only_nets = set([] if (not networks or None in networks) else networks)
        LOG.info(_LI('Synchronizing state'))
        pool = eventlet.GreenPool(self.conf.num_sync_threads)
        known_network_ids = set(self.cache.get_network_ids())

        try:
            active_networks = self.plugin_rpc.get_active_networks_info()
            active_network_ids = set(network.id for network in active_networks)
            for deleted_id in known_network_ids - active_network_ids:
                try:
                    self.disable_dhcp_helper(deleted_id)
                except Exception as e:
                    self.schedule_resync(e, deleted_id)
                    LOG.exception(_LE('Unable to sync network state on '
                                      'deleted network %s'), deleted_id)

            for network in active_networks:
                if (not only_nets or  # specifically resync all
                        network.id not in known_network_ids or  # missing net
                        network.id in only_nets):  # specific network to sync
                    pool.spawn(self.safe_configure_dhcp_for_network, network)
            pool.waitall()
            LOG.info(_LI('Synchronizing state complete'))

        except Exception as e:
            if only_nets:
                for network_id in only_nets:
                    self.schedule_resync(e, network_id)
            else:
                self.schedule_resync(e)
            LOG.exception(_LE('Unable to sync network state.'))
Exemplo n.º 17
0
    def port_bound(self, port_id, net_uuid,
                   network_type, physical_network, segmentation_id, userid):
        LOG.info(_LI("Start to bind port port_id:%(port_id)s, "
                     "net_uuid:%(net_uuid)s, network_type: %(network_type)s, "
                     "physical_network: %(physical_network)s, "
                     "userid: %(userid)s, segmentation_id:%(seg_id)s"),
                 {'port_id': port_id, 'net_uuid': net_uuid,
                  'network_type': network_type,
                  'physical_network': physical_network,
                  'seg_id': segmentation_id,
                  'userid': userid})

        self._utils.grant_user(self._zhcp_node, physical_network, userid)
        vdev = self._utils.couple_nic_to_vswitch(physical_network, port_id,
                                                 self._zhcp_node, userid)
        self._utils.put_user_direct_online(self._zhcp_node,
                                           self._zhcp_userid)

        if network_type == p_const.TYPE_VLAN:
            LOG.info(_LI('Binding VLAN, VLAN ID: %(segmentation_id)s, '
                         'port_id: %(port_id)s'),
                     {'segmentation_id': segmentation_id,
                      'port_id': port_id})
            self._utils.set_vswitch_port_vlan_id(segmentation_id, port_id,
                                                 vdev, self._zhcp_node,
                                                 physical_network)
        else:
            LOG.info(_LI('Bind %s port done'), port_id)
Exemplo n.º 18
0
    def delete_router(self, context, id):
        router_db = self._get_router(context, id)
        super(L3_HA_NAT_db_mixin, self).delete_router(context, id)

        if router_db.extra_attributes.ha:
            ha_network = self.get_ha_network(context,
                                             router_db.tenant_id)
            if ha_network:
                self._delete_vr_id_allocation(
                    context, ha_network, router_db.extra_attributes.ha_vr_id)
                self._delete_ha_interfaces(context, router_db.id)
            try:
                if not self._ha_routers_present(context,
                                                router_db.tenant_id):
                    self._delete_ha_network(context, ha_network)
                    LOG.info(_LI("HA network %(network)s was deleted as "
                                 "no HA routers are present in tenant "
                                 "%(tenant)s."),
                             {'network': ha_network.network_id,
                              'tenant': router_db.tenant_id})
            except n_exc.NetworkNotFound:
                LOG.debug("HA network %s was already deleted.",
                          ha_network.network_id)
            except sa.exc.InvalidRequestError:
                LOG.info(_LI("HA network %s can not be deleted."),
                         ha_network.network_id)
Exemplo n.º 19
0
    def _get_dp(self):
        """Get (dp, ofp, ofpp) tuple for the switch.

        A convenient method for openflow message composers.
        """
        while True:
            if self._cached_dpid is None:
                dpid_str = self.get_datapath_id()
                LOG.info(_LI("Bridge %(br_name)s has datapath-ID %(dpid)s"),
                         {"br_name": self.br_name, "dpid": dpid_str})
                self._cached_dpid = int(dpid_str, 16)
            try:
                dp = self._get_dp_by_dpid(self._cached_dpid)
                return dp, dp.ofproto, dp.ofproto_parser
            except RuntimeError:
                with excutils.save_and_reraise_exception() as ctx:
                    # Retry if dpid has been changed.
                    # NOTE(yamamoto): Open vSwitch change its dpid on
                    # some events.
                    # REVISIT(yamamoto): Consider to set dpid statically.
                    old_dpid_str = format(self._cached_dpid, '0x')
                    new_dpid_str = self.get_datapath_id()
                    if new_dpid_str != old_dpid_str:
                        LOG.info(_LI("Bridge %(br_name)s changed its "
                                     "datapath-ID from %(old)s to %(new)s"), {
                            "br_name": self.br_name,
                            "old": old_dpid_str,
                            "new": new_dpid_str,
                        })
                        ctx.reraise = False
                    self._cached_dpid = int(new_dpid_str, 16)
Exemplo n.º 20
0
 def _delete_port_group(self, pg_ref, name):
     remove_used_pg_try = 0
     while True:
         try:
             pg_delete_task = self.connection.invoke_api(
                 self.connection.vim,
                 'Destroy_Task',
                 pg_ref)
             self.connection.wait_for_task(pg_delete_task)
             LOG.info(_LI('Network %(name)s deleted.') % {'name': name})
             break
         except vmware_exceptions.VimException as e:
             if dvs_const.RESOURCE_IN_USE in e.message:
                 remove_used_pg_try += 1
                 if remove_used_pg_try > 3:
                     LOG.info(_LI('Network %(name)s was not deleted. Active'
                                  ' ports were found') % {'name': name})
                     break
                 else:
                     sleep(0.2)
             else:
                 raise exceptions.wrap_wmvare_vim_exception(e)
         except vmware_exceptions.VMwareDriverException as e:
             if dvs_const.DELETED_TEXT in e.message:
                 sleep(0.1)
             else:
                 raise
Exemplo n.º 21
0
    def _create_resource_instance(self, resource_name, plural_name):
        """Factory function for quota Resource.

        This routine returns a resource instance of the appropriate type
        according to system configuration.

        If QUOTAS.track_quota_usage is True, and there is a model mapping for
        the current resource, this function will return an instance of
        AccountedResource; otherwise an instance of CountableResource.
        """

        if (not cfg.CONF.QUOTAS.track_quota_usage or
            resource_name not in self._tracked_resource_mappings):
            LOG.info(_LI("Creating instance of CountableResource for "
                         "resource:%s"), resource_name)
            return resource.CountableResource(
                resource_name, resource._count_resource,
                'quota_%s' % resource_name)
        else:
            LOG.info(_LI("Creating instance of TrackedResource for "
                         "resource:%s"), resource_name)
            return resource.TrackedResource(
                resource_name,
                self._tracked_resource_mappings[resource_name],
                'quota_%s' % resource_name)
Exemplo n.º 22
0
def main():
    """Main method for cleaning up OVS bridges.

    The utility cleans up the integration bridges used by Neutron.
    """

    conf = setup_conf()
    conf()
    config.setup_logging()

    configuration_bridges = set([conf.ovs_integration_bridge,
                                 conf.external_network_bridge])
    ovs = ovs_lib.BaseOVS()
    ovs_bridges = set(ovs.get_bridges())
    available_configuration_bridges = configuration_bridges & ovs_bridges

    if conf.ovs_all_ports:
        bridges = ovs_bridges
    else:
        bridges = available_configuration_bridges

    # Collect existing ports created by Neutron on configuration bridges.
    # After deleting ports from OVS bridges, we cannot determine which
    # ports were created by Neutron, so port information is collected now.
    ports = collect_neutron_ports(available_configuration_bridges)

    for bridge in bridges:
        LOG.info(_LI("Cleaning bridge: %s"), bridge)
        ovs = ovs_lib.OVSBridge(bridge)
        ovs.delete_ports(all_ports=conf.ovs_all_ports)

    # Remove remaining ports created by Neutron (usually veth pair)
    delete_neutron_ports(ports)

    LOG.info(_LI("OVS cleanup completed successfully"))
Exemplo n.º 23
0
 def refresh_firewall(self, device_ids=None):
     LOG.info(_LI("Refresh firewall rules"))
     if not device_ids:
         device_ids = self.firewall.ports.keys()
         if not device_ids:
             LOG.info(_LI("No ports here to refresh firewall"))
             return
     self._apply_port_filter(device_ids, update_filter=True)
Exemplo n.º 24
0
 def safe_configure_dhcp_for_network(self, network):
     try:
         network_id = network.get('id')
         LOG.info(_LI('Starting network %s dhcp configuration'), network_id)
         self.configure_dhcp_for_network(network)
         LOG.info(_LI('Finished network %s dhcp configuration'), network_id)
     except (exceptions.NetworkNotFound, RuntimeError):
         LOG.warn(_LW('Network %s may have been deleted and its resources '
                      'may have already been disposed.'), network.id)
Exemplo n.º 25
0
def setup_logging():
    """Sets up the logging options for a log with supplied name."""
    product_name = "neutron"
    logging.setup(cfg.CONF, product_name)
    LOG.info(_LI("Logging enabled!"))
    LOG.info(_LI("%(prog)s version %(version)s"),
             {'prog': sys.argv[0],
              'version': version.version_info.release_string()})
    LOG.debug("command line: %s", " ".join(sys.argv))
Exemplo n.º 26
0
 def _parse_networks(self, entries):
     self.flat_networks = entries
     if '*' in self.flat_networks:
         LOG.info(_LI("Arbitrary flat physical_network names allowed"))
         self.flat_networks = None
     elif not self.flat_networks:
         LOG.info(_LI("Flat networks are disabled"))
     else:
         LOG.info(_LI("Allowable flat physical_network names: %s"),
                  self.flat_networks)
Exemplo n.º 27
0
    def __init__(self):
        # Mapping from type name to DriverManager
        self.drivers = {}

        LOG.info(_LI("Configured type driver names: %s"), cfg.CONF.ml2.type_drivers)
        super(TypeManager, self).__init__("neutron.ml2.type_drivers", cfg.CONF.ml2.type_drivers, invoke_on_load=True)
        LOG.info(_LI("Loaded type driver names: %s"), self.names())
        self._register_types()
        self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types)
        self._check_external_network_type(cfg.CONF.ml2.external_network_type)
Exemplo n.º 28
0
def eventlet_rpc_server():
    LOG.info(_LI("Eventlet based AMQP RPC server starting..."))

    try:
        rpc_workers_launcher = service.start_rpc_workers()
    except NotImplementedError:
        LOG.info(_LI("RPC was already started in parent process by "
                     "plugin."))
    else:
        rpc_workers_launcher.wait()
Exemplo n.º 29
0
    def _treat_devices_added(self, devices):
        for device in devices:
            LOG.info(_LI("Adding port %s") % device)
            try:
                details = self.plugin_rpc.get_device_details(self.context,
                                                             device,
                                                             self.agent_id)
            except Exception:
                LOG.info(_LI("Unable to get port details for %s:"), device)
                continue

            try:
                if 'port_id' in details:
                    LOG.info(_LI("Port %(device)s updated. "
                               "Details: %(details)s"),
                             {'device': device, 'details': details})
                    (node, userid) = self._treat_vif_port(
                                     details['port_id'],
                                     details['network_id'],
                                     details['network_type'],
                                     details['physical_network'],
                                     details['segmentation_id'],
                                     details['admin_state_up'])
                    # add device done, keep port map info
                    self._port_map[device] = {}
                    self._port_map[device]['userid'] = userid
                    self._port_map[device]['nodename'] = node
                    self._port_map[device]['vswitch'] = details[
                                                        'physical_network']
                    self._port_map[device]['vlan_id'] = details[
                                                        'segmentation_id']

                    # no rollback if this fails
                    self._utils.update_xcat_switch(details['port_id'],
                                     details['physical_network'],
                                     details['segmentation_id'])
                    if details.get('admin_state_up'):
                        LOG.info(_LI("Setting status for %s to UP"), device)
                        self.plugin_rpc.update_device_up(
                            self.context, device, self.agent_id, self._host)
                    else:
                        LOG.info(_LI("Setting status for %s to DOWN"), device)
                        self.plugin_rpc.update_device_down(
                            self.context, device, self.agent_id, self._host)

                else:
                    LOG.warning(_LW("Device %(device)s not defined on "
                                    "Neutron server, The output detail is "
                                    "%(details)s"),
                                {'device': device, 'details': details})
                    continue
            except Exception as e:
                LOG.exception(_LE("Can not add device %(device)s: %(msg)s"),
                              {'device': device, 'msg': e})
                continue
    def __init__(self):
        # Mapping from provisioning driver name to DriverManager
        self.drivers = {}

        LOG.info(_LI("Configured provisioning driver names: %s"),
                 cfg.CONF.ml2_hpe.prov_driver)
        super(ProvisioningManager, self).__init__('bnp.prov_driver',
                                                  cfg.CONF.ml2_hpe.prov_driver,
                                                  invoke_on_load=True)
        LOG.info(_LI("Loaded provisioning driver names: %s"), self.names())
        self._register_provisioning()
Exemplo n.º 31
0
 def agent_updated(self, context, payload):
     """Handle the agent_updated notification event."""
     self.schedule_resync(
         _("Agent updated: %(payload)s") % {"payload": payload})
     LOG.info(_LI("agent_updated by server side %s!"), payload)
Exemplo n.º 32
0
 def after_start(self):
     LOG.info(_LI("DHCP agent started"))
Exemplo n.º 33
0
 def after_start(self):
     LOG.info(_LI("BGP dynamic routing agent started"))
Exemplo n.º 34
0
    def __init__(self):

        LOG.info(_LI("Configured ODL username: %s"),
                 cfg.CONF.odl_driver.odl_username)
        LOG.info(_LI("Configured ODL password: %s"),
                 cfg.CONF.odl_driver.odl_password)
        LOG.info(_LI("Configured ODL host: %s"),
                 cfg.CONF.odl_driver.odl_host)
        LOG.info(_LI("Configured ODL port: %s"),
                 cfg.CONF.odl_driver.odl_port)

        self._username = cfg.CONF.odl_driver.odl_username
        self._password = cfg.CONF.odl_driver.odl_password
        self._host = cfg.CONF.odl_driver.odl_host
        self._port = cfg.CONF.odl_driver.odl_port
        self._headers = {
            'Content-type': 'application/yang.data+json',
            'Accept': 'application/yang.data+json',
        }

        self._base_url = (
            "http://%(host)s:%(port)s/restconf" %
            {'host': self._host, 'port': self._port}
        )
        self._reg_ep_url = (
            self._base_url +
            '/operations/endpoint:register-endpoint'
        )
        self._unreg_ep_url = (
            self._base_url +
            '/operations/endpoint:unregister-endpoint'
        )
        self._policy_url = (
            self._base_url +
            '/config/policy:tenants/policy:tenant/%(tenant_id)s'
        )
        self._action_url = (
            self._policy_url +
            '/subject-feature-instances/action-instance/%(action)s'
        )
        self._classifier_url = (
            self._policy_url +
            '/subject-feature-instances/classifier-instance/%(classifier)s'
        )
        self._l3ctx_url = (
            self._policy_url +
            '/l3-context/%(l3ctx)s'
        )
        self._l2bd_url = (
            self._policy_url +
            '/l2-bridge-domain/%(l2bd)s'
        )
        self._l2fd_url = (
            self._policy_url +
            '/l2-flood-domain/%(l2fd)s'
        )
        self._epg_url = (
            self._policy_url +
            '/policy:endpoint-group/%(epg)s'
        )
        self._subnet_url = (
            self._policy_url +
            '/subnet/%(subnet)s'
        )
        self._contract_url = (
            self._policy_url +
            '/policy:contract/%(contract)s'
        )
Exemplo n.º 35
0
    def treat_devices_added_updated(self, devices):
        try:
            devices_details_list = self.plugin_rpc.get_devices_details_list(
                self.context, devices, self.agent_id)
        except Exception:
            LOG.exception(_LE("Unable to get port details for %s"), devices)
            # resync is needed
            return True

        for device_details in devices_details_list:
            device = device_details['device']
            LOG.debug("Port %s added", device)

            if 'port_id' in device_details:
                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
                         {'device': device, 'details': device_details})
                if self.prevent_arp_spoofing:
                    port = self.br_mgr.get_tap_device_name(
                        device_details['port_id'])
                    arp_protect.setup_arp_spoofing_protection(port,
                                                              device_details)
                # create the networking for the port
                network_type = device_details.get('network_type')
                segmentation_id = device_details.get('segmentation_id')
                tap_in_bridge = self.br_mgr.add_interface(
                    device_details['network_id'], network_type,
                    device_details['physical_network'], segmentation_id,
                    device_details['port_id'], device_details['device_owner'])
                # REVISIT(scheuran): Changed the way how ports admin_state_up
                # is implemented.
                #
                # Old lb implementation:
                # - admin_state_up: ensure that tap is plugged into bridge
                # - admin_state_down: remove tap from bridge
                # New lb implementation:
                # - admin_state_up: set tap device state to up
                # - admin_state_down: set tap device stae to down
                #
                # However both approaches could result in races with
                # nova/libvirt and therefore to an invalid system state in the
                # scenario, where an instance is booted with a port configured
                # with admin_state_up = False:
                #
                # Libvirt does the following actions in exactly
                # this order (see libvirt virnetdevtap.c)
                #     1) Create the tap device, set its MAC and MTU
                #     2) Plug the tap into the bridge
                #     3) Set the tap online
                #
                # Old lb implementation:
                #   A race could occur, if the lb agent removes the tap device
                #   right after step 1). Then libvirt will add it to the bridge
                #   again in step 2).
                # New lb implementation:
                #   The race could occur if the lb-agent sets the taps device
                #   state to down right after step 2). In step 3) libvirt
                #   might set it to up again.
                #
                # This is not an issue if an instance is booted with a port
                # configured with admin_state_up = True. Libvirt would just
                # set the tap device up again.
                #
                # This refactoring is recommended for the following reasons:
                # 1) An existing race with libvirt caused by the behavior of
                #    the old implementation. See Bug #1312016
                # 2) The new code is much more readable
                self._ensure_port_admin_state(device_details['port_id'],
                                              device_details['admin_state_up'])
                # update plugin about port status if admin_state is up
                if device_details['admin_state_up']:
                    if tap_in_bridge:
                        self.plugin_rpc.update_device_up(self.context,
                                                         device,
                                                         self.agent_id,
                                                         cfg.CONF.host)
                    else:
                        self.plugin_rpc.update_device_down(self.context,
                                                           device,
                                                           self.agent_id,
                                                           cfg.CONF.host)
            else:
                LOG.info(_LI("Device %s not defined on plugin"), device)
        return False
Exemplo n.º 36
0
 def agent_updated(self, context, payload):
     """Handle the agent_updated notification event."""
     self.fullsync = True
     LOG.info(_LI("agent_updated by server side %s!"), payload)
Exemplo n.º 37
0
 def initialize(self):
     self._sync_vlan_allocations()
     LOG.info(_LI("VlanTypeDriver initialization complete"))
Exemplo n.º 38
0
    def _process_device_if_exists(self, device_details):
        # ignore exceptions from devices that disappear because they will
        # be handled as removed in the next iteration
        device = device_details['device']
        with self._ignore_missing_device_exceptions(device):
            LOG.debug("Port %s added", device)

            if 'port_id' in device_details:
                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
                         {'device': device, 'details': device_details})
                if self.prevent_arp_spoofing:
                    self.mgr.setup_arp_spoofing_protection(device,
                                                           device_details)

                segment = amb.NetworkSegment(
                    device_details.get('network_type'),
                    device_details['physical_network'],
                    device_details.get('segmentation_id')
                )
                network_id = device_details['network_id']
                self.rpc_callbacks.add_network(network_id, segment)
                interface_plugged = self.mgr.plug_interface(
                    network_id, segment,
                    device, device_details['device_owner'])
                # REVISIT(scheuran): Changed the way how ports admin_state_up
                # is implemented.
                #
                # Old lb implementation:
                # - admin_state_up: ensure that tap is plugged into bridge
                # - admin_state_down: remove tap from bridge
                # New lb implementation:
                # - admin_state_up: set tap device state to up
                # - admin_state_down: set tap device state to down
                #
                # However both approaches could result in races with
                # nova/libvirt and therefore to an invalid system state in the
                # scenario, where an instance is booted with a port configured
                # with admin_state_up = False:
                #
                # Libvirt does the following actions in exactly
                # this order (see libvirt virnetdevtap.c)
                #     1) Create the tap device, set its MAC and MTU
                #     2) Plug the tap into the bridge
                #     3) Set the tap online
                #
                # Old lb implementation:
                #   A race could occur, if the lb agent removes the tap device
                #   right after step 1). Then libvirt will add it to the bridge
                #   again in step 2).
                # New lb implementation:
                #   The race could occur if the lb-agent sets the taps device
                #   state to down right after step 2). In step 3) libvirt
                #   might set it to up again.
                #
                # This is not an issue if an instance is booted with a port
                # configured with admin_state_up = True. Libvirt would just
                # set the tap device up again.
                #
                # This refactoring is recommended for the following reasons:
                # 1) An existing race with libvirt caused by the behavior of
                #    the old implementation. See Bug #1312016
                # 2) The new code is much more readable
                if interface_plugged:
                    self.mgr.ensure_port_admin_state(
                        device, device_details['admin_state_up'])
                # update plugin about port status if admin_state is up
                if device_details['admin_state_up']:
                    if interface_plugged:
                        self.plugin_rpc.update_device_up(self.context,
                                                         device,
                                                         self.agent_id,
                                                         cfg.CONF.host)
                    else:
                        self.plugin_rpc.update_device_down(self.context,
                                                           device,
                                                           self.agent_id,
                                                           cfg.CONF.host)
                self._update_network_ports(device_details['network_id'],
                                           device_details['port_id'],
                                           device_details['device'])
                self.ext_manager.handle_port(self.context, device_details)
                registry.notify(local_resources.PORT_DEVICE,
                                events.AFTER_UPDATE, self,
                                context=self.context,
                                device_details=device_details)
            else:
                LOG.info(_LI("Device %s not defined on plugin"), device)
Exemplo n.º 39
0
    def resource(request):
        route_args = request.environ.get('wsgiorg.routing_args')
        if route_args:
            args = route_args[1].copy()
        else:
            args = {}

        # NOTE(jkoelker) by now the controller is already found, remove
        #                it from the args if it is in the matchdict
        args.pop('controller', None)
        fmt = args.pop('format', None)
        action = args.pop('action', None)
        content_type = format_types.get(fmt,
                                        request.best_match_content_type())
        language = request.best_match_language()
        deserializer = deserializers.get(content_type)
        serializer = serializers.get(content_type)

        try:
            if request.body:
                args['body'] = deserializer.deserialize(request.body)['body']

            # Routes library is dumb and cuts off everything after last dot (.)
            # as format. At the same time, it doesn't enforce format suffix,
            # which combined makes it impossible to pass a 'id' with dots
            # included (the last section after the last dot is lost). This is
            # important for some API extensions like tags where the id is
            # really a tag name that can contain special characters.
            #
            # To work around the Routes behaviour, we will attach the suffix
            # back to id if it's not one of supported formats (atm json only).
            # This of course won't work for the corner case of a tag name that
            # actually ends with '.json', but there seems to be no better way
            # to tackle it without breaking API backwards compatibility.
            if fmt is not None and fmt not in format_types:
                args['id'] = '.'.join([args['id'], fmt])

            method = getattr(controller, action)
            result = method(request=request, **args)
        except Exception as e:
            mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
                                                                  language)
            if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
                LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
                         {'action': action, 'exc': mapped_exc})
            else:
                LOG.exception(
                    _LE('%(action)s failed: %(details)s'),
                    {
                        'action': action,
                        'details': utils.extract_exc_details(e),
                    }
                )
            raise mapped_exc

        status = action_status.get(action, 200)
        body = serializer.serialize(result)
        # NOTE(jkoelker) Comply with RFC2616 section 9.7
        if status == 204:
            content_type = ''
            body = None

        return webob.Response(request=request, status=status,
                              content_type=content_type,
                              body=body)
Exemplo n.º 40
0
 def after_start(self):
     self.run()
     LOG.info(_LI("BGP Dynamic Routing agent started"))
Exemplo n.º 41
0
def disable_dvr_extension_by_config(aliases):
    if not cfg.CONF.enable_dvr:
        LOG.info(_LI('Disabled DVR extension.'))
        if 'dvr' in aliases:
            aliases.remove('dvr')
Exemplo n.º 42
0
 def initialize(self):
     # Initialize each driver in the list.
     for driver in self.ordered_ext_drivers:
         LOG.info(_LI("Initializing extension driver '%s'"), driver.name)
         driver.obj.initialize()
Exemplo n.º 43
0
 def initialize(self):
     for driver in self.ordered_mech_drivers:
         LOG.info(_LI("Initializing mechanism driver '%s'"), driver.name)
         driver.obj.initialize()
Exemplo n.º 44
0
 def initialize(self):
     LOG.info(_LI("ML2 FlatTypeDriver initialization complete"))
Exemplo n.º 45
0
 def initialize(self):
     for network_type, driver in six.iteritems(self.drivers):
         LOG.info(_LI("Initializing driver for type '%s'"), network_type)
         driver.obj.initialize()
 def provisioning_driver(self, provisioning_type):
     """provisioning driver instance."""
     driver = self.drivers.get(provisioning_type)
     LOG.info(_LI("Loaded provisioning driver type: %s"), driver.obj)
     return driver
Exemplo n.º 47
0
 def stop(self, graceful=True):
     LOG.info(_LI("Stopping linuxbridge agent."))
     if graceful and self.quitting_rpc_timeout:
         self.set_rpc_timeout(self.quitting_rpc_timeout)
     super(LinuxBridgeNeutronAgentRPC, self).stop(graceful)
Exemplo n.º 48
0
 def __init__(self):
     LOG.info(_LI("APIC AIM L3 Plugin __init__"))
     extensions.append_api_extensions_path(extensions_pkg.__path__)
     self._mechanism_driver = None
     super(ApicL3Plugin, self).__init__()
Exemplo n.º 49
0
 def after_start(self):
     self.run()
     LOG.info(_LI("DHCP agent started"))
Exemplo n.º 50
0
 def agent_updated(self, context, payload):
     """Handle the agent_updated notification event."""
     self.schedule_full_resync(reason=_("BgpDrAgent updated: %s") % payload)
     LOG.info(_LI("agent_updated by server side %s!"), payload)
Exemplo n.º 51
0
def disable_security_group_extension_by_config(aliases):
    if not is_firewall_enabled():
        LOG.info(_LI('Disabled security-group extension.'))
        _disable_extension('security-group', aliases)
        LOG.info(_LI('Disabled allowed-address-pairs extension.'))
        _disable_extension('allowed-address-pairs', aliases)
Exemplo n.º 52
0
    def __init__(self,
                 plugin,
                 collection,
                 resource,
                 attr_info,
                 allow_bulk=False,
                 member_actions=None,
                 parent=None,
                 allow_pagination=False,
                 allow_sorting=False):
        if member_actions is None:
            member_actions = []
        self._plugin = plugin
        self._collection = collection.replace('-', '_')
        self._resource = resource.replace('-', '_')
        self._attr_info = attr_info
        self._allow_bulk = allow_bulk
        self._allow_pagination = allow_pagination
        self._allow_sorting = allow_sorting
        self._native_bulk = self._is_native_bulk_supported()
        self._native_pagination = self._is_native_pagination_supported()
        self._native_sorting = self._is_native_sorting_supported()
        self._policy_attrs = [
            name for (name, info) in self._attr_info.items()
            if info.get('required_by_policy')
        ]
        self._notifier = n_rpc.get_notifier('network')
        # use plugin's dhcp notifier, if this is already instantiated
        agent_notifiers = getattr(plugin, 'agent_notifiers', {})
        self._dhcp_agent_notifier = (
            agent_notifiers.get(constants.AGENT_TYPE_DHCP)
            or dhcp_rpc_agent_api.DhcpAgentNotifyAPI())
        if cfg.CONF.notify_nova_on_port_data_changes:
            from neutron.notifiers import nova
            self._nova_notifier = nova.Notifier()
        self._member_actions = member_actions
        self._primary_key = self._get_primary_key()
        if self._allow_pagination and self._native_pagination:
            # Native pagination need native sorting support
            if not self._native_sorting:
                raise exceptions.Invalid(
                    _("Native pagination depend on native sorting"))
            if not self._allow_sorting:
                LOG.info(
                    _LI("Allow sorting is enabled because native "
                        "pagination requires native sorting"))
                self._allow_sorting = True

        if parent:
            self._parent_id_name = '%s_id' % parent['member_name']
            parent_part = '_%s' % parent['member_name']
        else:
            self._parent_id_name = None
            parent_part = ''
        self._plugin_handlers = {
            self.LIST: 'get%s_%s' % (parent_part, self._collection),
            self.SHOW: 'get%s_%s' % (parent_part, self._resource)
        }
        for action in [self.CREATE, self.UPDATE, self.DELETE]:
            self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
                                                         self._resource)
Exemplo n.º 53
0
 def security_groups_rule_updated(self, security_groups):
     LOG.info(_LI("Security group " "rule updated %r"), security_groups)
     self._security_group_updated(security_groups, 'security_groups',
                                  'sg_rule')
Exemplo n.º 54
0
 def security_groups_member_updated(self, security_groups):
     LOG.info(_LI("Security group " "member updated %r"), security_groups)
     self._security_group_updated(security_groups,
                                  'security_group_source_groups',
                                  'sg_member')
Exemplo n.º 55
0
 def initialize(self):
     LOG.info(_LI("PortSecurityExtensionDriver initialization complete"))
Exemplo n.º 56
0
    def tunnel_sync(self, rpc_context, **kwargs):
        """Update new tunnel.

        Updates the database with the tunnel IP. All listening agents will also
        be notified about the new tunnel IP.
        """
        tunnel_ip = kwargs.get('tunnel_ip')
        if not tunnel_ip:
            msg = _("Tunnel IP value needed by the ML2 plugin")
            raise exc.InvalidInput(error_message=msg)

        host = kwargs.get('host')
        version = netaddr.IPAddress(tunnel_ip).version
        if version != cfg.CONF.ml2.overlay_ip_version:
            msg = (_("Tunnel IP version does not match ML2 "
                     "overlay_ip_version, host: %(host)s, tunnel_ip: %(ip)s"),
                   {
                       'host': host,
                       'ip': tunnel_ip
                   })
            raise exc.InvalidInput(error_message=msg)

        tunnel_type = kwargs.get('tunnel_type')
        if not tunnel_type:
            msg = _("Network type value needed by the ML2 plugin")
            raise exc.InvalidInput(error_message=msg)

        driver = self._type_manager.drivers.get(tunnel_type)
        if driver:
            # The given conditional statements will verify the following
            # things:
            # 1. If host is not passed from an agent, it is a legacy mode.
            # 2. If passed host and tunnel_ip are not found in the DB,
            #    it is a new endpoint.
            # 3. If host is passed from an agent and it is not found in DB
            #    but the passed tunnel_ip is found, delete the endpoint
            #    from DB and add the endpoint with (tunnel_ip, host),
            #    it is an upgrade case.
            # 4. If passed host is found in DB and passed tunnel ip is not
            #    found, delete the endpoint belonging to that host and
            #    add endpoint with latest (tunnel_ip, host), it is a case
            #    where local_ip of an agent got changed.
            # 5. If the passed host had another ip in the DB the host-id has
            #    roamed to a different IP then delete any reference to the new
            #    local_ip or the host id. Don't notify tunnel_delete for the
            #    old IP since that one could have been taken by a different
            #    agent host-id (neutron-ovs-cleanup should be used to clean up
            #    the stale endpoints).
            #    Finally create a new endpoint for the (tunnel_ip, host).
            if host:
                host_endpoint = driver.obj.get_endpoint_by_host(host)
                ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip)

                if (ip_endpoint and ip_endpoint.host is None
                        and host_endpoint is None):
                    driver.obj.delete_endpoint(ip_endpoint.ip_address)
                elif (ip_endpoint and ip_endpoint.host != host):
                    LOG.info(
                        _LI("Tunnel IP %(ip)s was used by host %(host)s and "
                            "will be assigned to %(new_host)s"), {
                                'ip': ip_endpoint.ip_address,
                                'host': ip_endpoint.host,
                                'new_host': host
                            })
                    driver.obj.delete_endpoint_by_host_or_ip(
                        host, ip_endpoint.ip_address)
                elif (host_endpoint and host_endpoint.ip_address != tunnel_ip):
                    # Notify all other listening agents to delete stale tunnels
                    self._notifier.tunnel_delete(rpc_context,
                                                 host_endpoint.ip_address,
                                                 tunnel_type)
                    driver.obj.delete_endpoint(host_endpoint.ip_address)

            tunnel = driver.obj.add_endpoint(tunnel_ip, host)
            tunnels = driver.obj.get_endpoints()
            entry = {'tunnels': tunnels}
            # Notify all other listening agents
            self._notifier.tunnel_update(rpc_context, tunnel.ip_address,
                                         tunnel_type)
            # Return the list of tunnels IP's to the agent
            return entry
        else:
            msg = _("Network type value '%s' not supported") % tunnel_type
            raise exc.InvalidInput(error_message=msg)
Exemplo n.º 57
0
 def initialize(self):
     LOG.info(_LI("DNSExtensionDriverML2 initialization complete"))
Exemplo n.º 58
0
 def prepare_devices_filter(self, device_ids):
     if not device_ids:
         return
     LOG.info(_LI("Preparing filters for devices %s"), device_ids)
     self._apply_port_filter(device_ids)
Exemplo n.º 59
0
 def stop(self, graceful=True):
     LOG.info(_LI("Stopping %s agent."), self.agent_type)
     if graceful and self.quitting_rpc_timeout:
         self.set_rpc_timeout(self.quitting_rpc_timeout)
     super(CommonAgentLoop, self).stop(graceful)
Exemplo n.º 60
0
 def __init__(self):
     LOG.info(_LI("ML2 LocalTypeDriver initialization complete"))