示例#1
0
    def _execute(cls, *args):
        """Run received command and return the output."""
        command = [CONF.virtualbox.vboxmanage_cmd, "--nologo"]
        command.extend(args)
        LOG.debug("Execute: %s", command)
        stdout, stderr = None, None
        for _ in range(CONF.virtualbox.retry_count):
            try:
                process = subprocess.Popen(
                    command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                    universal_newlines=True)
            except subprocess.CalledProcessError as exc:
                stderr = exc.output
            else:
                stdout, stderr = process.communicate()

            if stderr and constants.VBOX_E_ACCESSDENIED in stderr:
                LOG.warning(_LW("Something went wrong, trying again."))
                time.sleep(CONF.virtualbox.retry_interval)
                continue

            break
        else:
            LOG.warning(_LW("Failed to process command."))

        return (stdout, stderr)
示例#2
0
    def _inspect_instance(self, instance_name):
        """Get the network information from an instance."""
        network = {}
        try:
            instace_info = self._vbox.show_vm_info(instance_name)
        except exception.InstanceNotFound:
            LOG.warning(_LW("Failed to get specification for `%(instance)s`"),
                        {"instance": instance_name})
            return

        description = instace_info.get(constants.VM_DESCRIPTION)
        if not self._process_description(description):
            LOG.warning(_LW("Invalid description for `%(instance)s`: "
                            "%(description)s"),
                        {"instance": instance_name,
                         "description": description})
            return

        for field, value in instace_info.items():
            if field[:-1] in constants.NETWORK_FIELDS:
                network.setdefault(field[-1], {})[field[:-1]] = value

        for nic_index in network:
            mac_address = network[nic_index].get(constants.MAC_ADDRESS)
            device_id = self._device_map.get(mac_address)
            nic_mode = network[nic_index].get(constants.NIC_MODE)

            if device_id and nic_mode != constants.NIC_MODE_NONE:
                self._nic[device_id] = network[nic_index]
                self._nic[device_id]["index"] = nic_index
                self._nic[device_id]["instance"] = instance_name
                self._nic[device_id]["state"] = instace_info.get(
                    constants.VM_STATE)
    def _sync_ports(self, ctx):
        LOG.debug("OVN-NB Sync ports started")

        lports = self.ovn_api.get_all_logical_ports_ids()

        for port in self.core_plugin.get_ports(ctx):
            try:
                if self.mode == SYNC_MODE_REPAIR:
                    self.core_plugin.create_port_in_ovn(port)
                res = lports.pop(port['id'], None)
                if self.mode == SYNC_MODE_LOG:
                    if res is None:
                        LOG.warn(_LW("Port found in Neutron but not in OVN"
                                     "DB, port_id=%s"),
                                 port['id'])

            except RuntimeError:
                LOG.warn(_LW("Create port failed for"
                             " port %s"), port['id'])

        # Only delete logical port if it was previously created by neutron
        with self.ovn_api.transaction() as txn:
            for lport, ext_ids in lports.items():
                if ovn_const.OVN_PORT_NAME_EXT_ID_KEY in ext_ids:
                    if self.mode == SYNC_MODE_REPAIR:
                        txn.add(self.ovn_api.delete_lport(lport))
                    if self.mode == SYNC_MODE_LOG:
                        LOG.warn(_LW("Port found in OVN but not in Neutron,"
                                     " port_name=%s"),
                                 ext_ids[ovn_const.OVN_PORT_NAME_EXT_ID_KEY])

        LOG.debug("OVN-NB Sync ports finished")
    def _get_candidates(self, plugin, context, sync_router):
        """Return L3 agents where a router could be scheduled."""
        with context.session.begin(subtransactions=True):
            # allow one router is hosted by just
            # one enabled l3 agent hosting since active is just a
            # timing problem. Non-active l3 agent can return to
            # active any time
            current_l3_agents = plugin.get_l3_agents_hosting_routers(
                context, [sync_router['id']], admin_state_up=True)
            is_router_distributed = sync_router.get('distributed', False)
            if current_l3_agents and not is_router_distributed:
                LOG.debug('Router %(router_id)s has already been hosted'
                          ' by L3 agent %(agent_id)s',
                          {'router_id': sync_router['id'],
                           'agent_id': current_l3_agents[0]['id']})
                return []

            active_l3_agents = plugin.get_l3_agents(context, active=True)
            if not active_l3_agents:
                LOG.warn(_LW('No active L3 agents'))
                return []
            potential_candidates = list(
                set(active_l3_agents) - set(current_l3_agents))
            new_l3agents = []
            if potential_candidates:
                new_l3agents = plugin.get_l3_agent_candidates(
                    context, sync_router, potential_candidates)
                if not new_l3agents:
                    LOG.warn(_LW('No L3 agents can host the router %s'),
                             sync_router['id'])
            return new_l3agents
 def _populate_policy_profiles(self):
     """Populate all the policy profiles from VSM."""
     hosts = self.n1kvclient.get_vsm_hosts()
     for vsm_ip in hosts:
         try:
             policy_profiles = self.n1kvclient.list_port_profiles(vsm_ip)
             vsm_profiles = {}
             plugin_profiles_set = set()
             # Fetch policy profiles from VSM
             for profile_name in policy_profiles:
                 profile_id = (policy_profiles[profile_name]
                               [n1kv_const.PROPERTIES][n1kv_const.ID])
                 vsm_profiles[profile_id] = profile_name
             # Fetch policy profiles previously populated
             for profile in self._get_policy_profiles_by_host(vsm_ip):
                 plugin_profiles_set.add(profile.id)
             vsm_profiles_set = set(vsm_profiles)
             # Update database if the profile sets differ.
             if vsm_profiles_set.symmetric_difference(plugin_profiles_set):
                 # Add new profiles to database if they were created in VSM
                 for pid in vsm_profiles_set.difference(
                                             plugin_profiles_set):
                     self._add_policy_profile(pid, vsm_profiles[pid],
                                              vsm_ip)
                 # Delete profiles from database if they were deleted in VSM
                 for pid in plugin_profiles_set.difference(
                                                vsm_profiles_set):
                     if not n1kv_db.policy_profile_in_use(pid):
                         self._remove_policy_profile(pid, vsm_ip)
                     else:
                         LOG.warning(_LW('Policy profile %s in use'), pid)
         except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
             with excutils.save_and_reraise_exception(reraise=False):
                 LOG.warning(_LW('No policy profile populated from VSM'))
示例#6
0
 def call_driver(self, action, network, **action_kwargs):
     """Invoke an action on a DHCP driver instance."""
     LOG.debug('Calling driver for network: %(net)s action: %(action)s',
               {'net': network.id, 'action': action})
     try:
         # the Driver expects something that is duck typed similar to
         # the base models.
         driver = self.dhcp_driver_cls(self.conf,
                                       network,
                                       self._process_monitor,
                                       self.dhcp_version,
                                       self.plugin_rpc)
         getattr(driver, action)(**action_kwargs)
         return True
     except exceptions.Conflict:
         # No need to resync here, the agent will receive the event related
         # to a status update for the network
         LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
                         'is a conflict with its current state; please '
                         'check that the network and/or its subnet(s) '
                         'still exist.'),
                     {'net_id': network.id, 'action': action})
     except Exception as e:
         self.schedule_resync(e, network.id)
         if (isinstance(e, oslo_messaging.RemoteError)
             and e.exc_type == 'NetworkNotFound'
             or isinstance(e, exceptions.NetworkNotFound)):
             LOG.warning(_LW("Network %s has been deleted."), network.id)
         else:
             LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
                           {'net_id': network.id, 'action': action})
示例#7
0
def get_nsx_security_group_id(session, cluster, neutron_id):
    """Return the NSX sec profile uuid for a given neutron sec group.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing.
    NOTE: Security groups are called 'security profiles' on the NSX backend.
    """
    nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id)
    if not nsx_id:
        # Find security profile on backend.
        # This is a rather expensive query, but it won't be executed
        # more than once for each security group in Neutron's lifetime
        nsx_sec_profiles = secgrouplib.query_security_profiles(
            cluster, '*',
            filters={'tag': neutron_id,
                     'tag_scope': 'q_sec_group_id'})
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # security profile is found with the same neutron port tag
        if not nsx_sec_profiles:
            LOG.warn(_LW("Unable to find NSX security profile for Neutron "
                         "security group %s"), neutron_id)
            return
        elif len(nsx_sec_profiles) > 1:
            LOG.warn(_LW("Multiple NSX security profiles found for Neutron "
                         "security group %s"), neutron_id)
        nsx_sec_profile = nsx_sec_profiles[0]
        nsx_id = nsx_sec_profile['uuid']
        with session.begin(subtransactions=True):
            # Create DB mapping
            nsx_db.add_neutron_nsx_security_group_mapping(
                session, neutron_id, nsx_id)
    return nsx_id
示例#8
0
 def send_events(self, batched_events):
     LOG.debug("Sending events: %s", batched_events)
     try:
         response = self.nclient.server_external_events.create(
             batched_events)
     except nova_exceptions.NotFound:
         LOG.warning(_LW("Nova returned NotFound for event: %s"),
                     batched_events)
     except Exception:
         LOG.exception(_LE("Failed to notify nova on events: %s"),
                       batched_events)
     else:
         if not isinstance(response, list):
             LOG.error(_LE("Error response returned from nova: %s"),
                       response)
             return
         response_error = False
         for event in response:
             try:
                 code = event['code']
             except KeyError:
                 response_error = True
                 continue
             if code != 200:
                 LOG.warning(_LW("Nova event: %s returned with failed "
                                 "status"), event)
             else:
                 LOG.info(_LI("Nova event response: %s"), event)
         if response_error:
             LOG.error(_LE("Error response returned from nova: %s"),
                       response)
示例#9
0
    def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
        if self.eswitch_mgr.device_exists(device, pci_slot):
            try:
                self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
                                                       spoofcheck)
            except Exception:
                LOG.warning(_LW("Failed to set spoofcheck for device %s"),
                            device)
            LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
                     {"device": device, "spoofcheck": spoofcheck})

            try:
                self.eswitch_mgr.set_device_state(device, pci_slot,
                                                  admin_state_up)
            except exc.IpCommandOperationNotSupportedError:
                LOG.warning(_LW("Device %s does not support state change"),
                            device)
            except exc.SriovNicError:
                LOG.warning(_LW("Failed to set device %s state"), device)
                return
            if admin_state_up:
                # update plugin about port status
                self.plugin_rpc.update_device_up(self.context,
                                                 device,
                                                 self.agent_id,
                                                 cfg.CONF.host)
            else:
                self.plugin_rpc.update_device_down(self.context,
                                                   device,
                                                   self.agent_id,
                                                   cfg.CONF.host)
        else:
            LOG.info(_LI("No device with MAC %s defined on agent."), device)
示例#10
0
 def _load_all_extensions_from_path(self, path):
     # Sorting the extension list makes the order in which they
     # are loaded predictable across a cluster of load-balanced
     # Neutron Servers
     for f in sorted(os.listdir(path)):
         try:
             LOG.debug('Loading extension file: %s', f)
             mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
             ext_path = os.path.join(path, f)
             if file_ext.lower() == '.py' and not mod_name.startswith('_'):
                 mod = imp.load_source(mod_name, ext_path)
                 ext_name = mod_name[0].upper() + mod_name[1:]
                 new_ext_class = getattr(mod, ext_name, None)
                 if not new_ext_class:
                     LOG.warn(_LW('Did not find expected name '
                                  '"%(ext_name)s" in %(file)s'),
                              {'ext_name': ext_name,
                               'file': ext_path})
                     continue
                 new_ext = new_ext_class()
                 self.add_extension(new_ext)
         except Exception as exception:
             LOG.warn(_LW("Extension file %(f)s wasn't loaded due to "
                          "%(exception)s"),
                      {'f': f, 'exception': exception})
    def _router_removed(self, router_id, deconfigure=True):
        """Operations when a router is removed.

        Get the RouterInfo object corresponding to the router in the service
        helpers's router_info dict. If deconfigure is set to True,
        remove this router's configuration from the hosting device.
        :param router_id: id of the router
        :param deconfigure: if True, the router's configuration is deleted from
        the hosting device.
        :return: None
        """
        ri = self.router_info.get(router_id)
        if ri is None:
            LOG.warning(_LW("Info for router %s was not found. "
                       "Skipping router removal"), router_id)
            return
        ri.router['gw_port'] = None
        ri.router[l3_constants.INTERFACE_KEY] = []
        ri.router[l3_constants.FLOATINGIP_KEY] = []
        try:
            if deconfigure:
                self._process_router(ri)
                driver = self._drivermgr.get_driver(router_id)
                driver.router_removed(ri, deconfigure)
                self._drivermgr.remove_driver(router_id)
            del self.router_info[router_id]
            self.removed_routers.discard(router_id)
        except cfg_exceptions.DriverException:
            LOG.warning(_LW("Router remove for router_id: %s was incomplete. "
                       "Adding the router to removed_routers list"), router_id)
            self.removed_routers.add(router_id)
            # remove this router from updated_routers if it is there. It might
            # end up there too if exception was thrown earlier inside
            # `_process_router()`
            self.updated_routers.discard(router_id)
示例#12
0
文件: ovs_lib.py 项目: gampel/neutron
 def get_vif_port_set(self):
     edge_ports = set()
     args = ['--format=json', '--', '--columns=external_ids,ofport',
             '--if-exists', 'list', 'Interface']
     args += self.get_port_name_list()
     result = self.run_vsctl(args, check_error=True)
     if not result:
         return edge_ports
     for row in jsonutils.loads(result)['data']:
         external_ids = dict(row[0][1])
         # Do not consider VIFs which aren't yet ready
         # This can happen when ofport values are either [] or ["set", []]
         # We will therefore consider only integer values for ofport
         ofport = row[1]
         if not isinstance(ofport, numbers.Integral):
             LOG.warn(_LW("Found not yet ready openvswitch port: %s"), row)
         elif ofport < 1:
             LOG.warn(_LW("Found failed openvswitch port: %s"), row)
         elif 'attached-mac' in external_ids:
             if "iface-id" in external_ids:
                 edge_ports.add(external_ids['iface-id'])
             elif "xs-vif-uuid" in external_ids:
                 # if this is a xenserver and iface-id is not
                 # automatically synced to OVS from XAPI, we grab it
                 # from XAPI directly
                 iface_id = self.get_xapi_iface_id(
                     external_ids["xs-vif-uuid"])
                 edge_ports.add(iface_id)
     return edge_ports
    def get_ml2_port_bond_data(self, ctx, port_id, device_id):
        core_plugin = manager.NeutronManager.get_plugin()
        port_context = core_plugin.get_bound_port_context(
            ctx, port_id, device_id)
        if not port_context:
            LOG.warning(_LW("Device %(device)s requested by agent "
                         "%(agent_id)s not found in database"),
                        {'device': device_id, 'agent_id': port_id})
            return None

        port = port_context.current

        try:
            segment = port_context.network.network_segments[0]
        except KeyError:
            if not segment:
                LOG.warning(_LW("Device %(device)s requested by agent "
                             " on network %(network_id)s not "
                             "bound, vif_type: "),
                            {'device': device_id,
                             'network_id': port['network_id']})
                return {}

        entry = {'device': device_id,
                 'network_id': port['network_id'],
                 'port_id': port_id,
                 'mac_address': port['mac_address'],
                 'admin_state_up': port['admin_state_up'],
                 'network_type': segment[api.NETWORK_TYPE],
                 'segmentation_id': segment[api.SEGMENTATION_ID],
                 'physical_network': segment[api.PHYSICAL_NETWORK],
                 'fixed_ips': port['fixed_ips'],
                 'device_owner': port['device_owner']}
        LOG.debug(("Returning: %s"), entry)
        return entry
示例#14
0
    def _sync_ports(self, ctx):
        LOG.debug("OVN-NB Sync ports started")

        lports = self.ovn_api.get_all_logical_ports_ids()

        for port in self.core_plugin.get_ports(ctx):
            _, binding = l2_db.get_locked_port_and_binding(ctx.session, port["id"])
            network = self.core_plugin.get_network(ctx, port["network_id"])
            port_context = driver_context.PortContext(self.core_plugin, ctx, port, network, binding, [])
            try:
                if self.mode == SYNC_MODE_REPAIR:
                    self.driver.create_port_postcommit(port_context)
                res = lports.pop(port_context.current["id"], None)
                if self.mode == SYNC_MODE_LOG:
                    if res is None:
                        LOG.warn(
                            _LW("Port found in Neutron but not in OVN" "DB, port_id=%s"), port_context.current["id"]
                        )

            except RuntimeError:
                LOG.warn(_LW("Create port postcommit failed for" " port %s"), port["id"])

        # Only delete logical port if it was previously created by neutron
        with self.ovn_api.transaction() as txn:
            for lport, ext_ids in lports.items():
                if ovn_const.OVN_PORT_NAME_EXT_ID_KEY in ext_ids:
                    if self.mode == SYNC_MODE_REPAIR:
                        txn.add(self.ovn_api.delete_lport(lport))
                    if self.mode == SYNC_MODE_LOG:
                        LOG.warn(
                            _LW("Port found in OVN but not in Neutron," " port_name=%s"),
                            ext_ids[ovn_const.OVN_PORT_NAME_EXT_ID_KEY],
                        )

        LOG.debug("OVN-NB Sync ports finished")
示例#15
0
    def _sync_networks(self, ctx):
        LOG.debug("OVN-NB Sync networks started")

        lswitches = self.ovn_api.get_all_logical_switches_ids()

        for network in self.core_plugin.get_networks(ctx):
            net_context = driver_context.NetworkContext(self.core_plugin, ctx, network)
            try:
                if self.mode == SYNC_MODE_REPAIR:
                    self.driver.create_network_postcommit(net_context)
                res = lswitches.pop(utils.ovn_name(net_context.current["id"]), None)
                if self.mode == SYNC_MODE_LOG:
                    if res is None:
                        LOG.warn(
                            _LW("Network found in Neutron but not in OVN" "DB, network_id=%s"),
                            net_context.current["id"],
                        )

            except RuntimeError:
                LOG.warn(_LW("Create network postcommit failed for " "network %s"), network["id"])

        # Only delete logical switch if it was previously created by neutron
        with self.ovn_api.transaction() as txn:
            for lswitch, ext_ids in lswitches.items():
                if ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY in ext_ids:
                    if self.mode == SYNC_MODE_REPAIR:
                        txn.add(self.ovn_api.delete_lswitch(lswitch))
                    if self.mode == SYNC_MODE_LOG:
                        LOG.warn(
                            _LW("Network found in OVN but not in Neutron," " network_name=%s"),
                            (ext_ids[ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY]),
                        )

        LOG.debug("OVN-NB Sync networks finished")
示例#16
0
 def _recv_data(self):
     chunks = []
     lc = rc = 0
     prev_char = None
     while True:
         try:
             response = self.socket.recv(n_const.BUFFER_SIZE)
             if response:
                 response = response.decode('utf8')
                 for i, c in enumerate(response):
                     if c == '{' and not (prev_char and
                                          prev_char == '\\'):
                         lc += 1
                     elif c == '}' and not (prev_char and
                                            prev_char == '\\'):
                         rc += 1
                     if lc == rc and lc is not 0:
                         chunks.append(response[0:i + 1])
                         message = "".join(chunks)
                         return message
                     prev_char = c
                 chunks.append(response)
             else:
                 LOG.warning(_LW("Did not receive any reply from the OVSDB "
                                 "server"))
                 return
         except (socket.error, socket.timeout):
             LOG.warning(_LW("Did not receive any reply from the OVSDB "
                             "server"))
             return
示例#17
0
 def update_status(self, context, obj_type, obj_id,
                   provisioning_status=None, operating_status=None):
     if not provisioning_status and not operating_status:
         LOG.warning(_LW('update_status for %(obj_type)s %(obj_id)s called '
                         'without specifying provisioning_status or '
                         'operating_status') % {'obj_type': obj_type,
                                                'obj_id': obj_id})
         return
     model_mapping = {
         'loadbalancer': db_models.LoadBalancer,
         'pool': db_models.PoolV2,
         'listener': db_models.Listener,
         'member': db_models.MemberV2,
         'healthmonitor': db_models.HealthMonitorV2
     }
     if obj_type not in model_mapping:
         raise n_exc.Invalid(_('Unknown object type: %s') % obj_type)
     try:
         self.plugin.db.update_status(
             context, model_mapping[obj_type], obj_id,
             provisioning_status=provisioning_status,
             operating_status=operating_status)
     except n_exc.NotFound:
         # update_status may come from agent on an object which was
         # already deleted from db with other request
         LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s '
                         'not found in the DB, it was probably deleted '
                         'concurrently'),
                     {'obj_type': obj_type, 'obj_id': obj_id})
示例#18
0
    def _sync_base(self):
        ctx = context.get_admin_context()
        # Sync Networks
        for network in self.core_plugin.get_networks(ctx):
            mech_context = driver_context.NetworkContext(self.core_plugin, ctx,
                                                         network)
            try:
                self.driver.create_network_postcommit(mech_context)
            except Exception:
                LOG.warn(_LW("Create network postcommit failed for "
                             "network %s"), network['id'])

        # Sync Subnets
        for subnet in self.core_plugin.get_subnets(ctx):
            mech_context = driver_context.SubnetContext(self.core_plugin, ctx,
                                                        subnet)
            try:
                self.driver.create_subnet_postcommit(mech_context)
            except Exception:
                LOG.warn(_LW("Create subnet postcommit failed for"
                             " subnet %s"), subnet['id'])

        # Sync Ports (compute/gateway/dhcp)
        for port in self.core_plugin.get_ports(ctx):
            _, binding = l2_db.get_locked_port_and_binding(ctx.session,
                                                           port['id'])
            network = self.core_plugin.get_network(ctx, port['network_id'])
            mech_context = driver_context.PortContext(self.core_plugin, ctx,
                                                      port, network, binding,
                                                      [])
            try:
                self.driver.create_port_postcommit(mech_context)
            except Exception:
                LOG.warn(_LW("Create port postcommit failed for"
                             " port %s"), port['id'])
示例#19
0
    def _get_port_infos(self, context, port, agent_host):
        if not agent_host:
            return

        session = db_api.get_session()
        agent = self.get_agent_by_host(session, agent_host)
        if not agent:
            return

        agent_ip = self.get_agent_ip(agent)
        if not agent_ip:
            LOG.warning(_LW("Unable to retrieve the agent ip, check the agent "
                            "configuration."))
            return

        segment = context.bottom_bound_segment
        if not segment:
            LOG.warning(_LW("Port %(port)s updated by agent %(agent)s "
                            "isn't bound to any segment"),
                        {'port': port['id'], 'agent': agent})
            return

        network_types = self.get_agent_l2pop_network_types(agent)
        if network_types is None:
            network_types = self.get_agent_tunnel_types(agent)
        if segment['network_type'] not in network_types:
            return

        fdb_entries = self._get_port_fdb_entries(port)

        return agent, agent_host, agent_ip, segment, fdb_entries
示例#20
0
    def get_candidates(self, plugin, context, sync_router):
        """Return L3 agents where a router could be scheduled."""
        with context.session.begin(subtransactions=True):
            # allow one router is hosted by just
            # one enabled l3 agent hosting since active is just a
            # timing problem. Non-active l3 agent can return to
            # active any time
            l3_agents = plugin.get_l3_agents_hosting_routers(
                context, [sync_router['id']], admin_state_up=True)
            if l3_agents and not sync_router.get('distributed', False):
                LOG.debug('Router %(router_id)s has already been hosted'
                          ' by L3 agent %(agent_id)s',
                          {'router_id': sync_router['id'],
                           'agent_id': l3_agents[0]['id']})
                return

            active_l3_agents = plugin.get_l3_agents(context, active=True)
            if not active_l3_agents:
                LOG.warn(_LW('No active L3 agents'))
                return
            new_l3agents = plugin.get_l3_agent_candidates(context,
                                                          sync_router,
                                                          active_l3_agents)
            old_l3agentset = set(l3_agents)
            if sync_router.get('distributed', False):
                new_l3agentset = set(new_l3agents)
                candidates = list(new_l3agentset - old_l3agentset)
            else:
                candidates = new_l3agents
                if not candidates:
                    LOG.warn(_LW('No L3 agents can host the router %s'),
                             sync_router['id'])

            return candidates
示例#21
0
    def _get_portpair_detail_info(self, portpair_id):
        """Get port detail.

        @param: portpair_id: uuid
        @return: (host_id, local_ip, network_type, segment_id,
        service_insert_type): tuple
        """

        core_plugin = manager.NeutronManager.get_plugin()
        port_detail = core_plugin.get_port(self.admin_context, portpair_id)
        host_id, local_ip, network_type, segment_id, mac_address = (
            (None, ) * 5)

        if port_detail:
            host_id = port_detail['binding:host_id']
            network_id = port_detail['network_id']
            mac_address = port_detail['mac_address']
            network_info = core_plugin.get_network(
                self.admin_context, network_id)
            network_type = network_info['provider:network_type']
            segment_id = network_info['provider:segmentation_id']

        if network_type != np_const.TYPE_VXLAN:
            LOG.warn(_LW("Currently only support vxlan network"))
            return ((None, ) * 5)
        elif not host_id:
            LOG.warn(_LW("This port has not been binding"))
            return ((None, ) * 5)
        else:
            driver = core_plugin.type_manager.drivers.get(network_type)
            host_endpoint = driver.obj.get_endpoint_by_host(host_id)
            local_ip = host_endpoint['ip_address']

        return host_id, local_ip, network_type, segment_id, mac_address
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                agent_mode = self._get_agent_mode(binding.l3_agent)
                if agent_mode == constants.L3_AGENT_MODE_DVR:
                    # rescheduling from l3 dvr agent on compute node doesn't
                    # make sense. Router will be removed from that agent once
                    # there are no dvr serviceable ports on that compute node
                    LOG.warn(_LW('L3 DVR agent on node %(host)s is down. '
                                 'Not rescheduling from agent in \'dvr\' '
                                 'mode.'), {'host': binding.l3_agent.host})
                    continue
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
示例#23
0
    def _redirect_params(self, conn, headers, allow_release_conn=False):
        """Process redirect response, create new connection if necessary.

        Args:
            conn: connection that returned the redirect response
            headers: response headers of the redirect response
            allow_release_conn: if redirecting to a different server,
                release existing connection back to connection pool.

        Returns: Return tuple(conn, url) where conn is a connection object
            to the redirect target and url is the path of the API request
        """

        url = None
        for name, value in headers:
            if name.lower() == "location":
                url = value
                break
        if not url:
            LOG.warn(_LW("[%d] Received redirect status without location "
                         "header field"), self._rid())
            return (conn, None)
        # Accept location with the following format:
        # 1. /path, redirect to same node
        # 2. scheme://hostname:[port]/path where scheme is https or http
        # Reject others
        # 3. e.g. relative paths, unsupported scheme, unspecified host
        result = urlparse.urlparse(url)
        if not result.scheme and not result.hostname and result.path:
            if result.path[0] == "/":
                if result.query:
                    url = "%s?%s" % (result.path, result.query)
                else:
                    url = result.path
                return (conn, url)      # case 1
            else:
                LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
                             "'%(url)s'"), {'rid': self._rid(), 'url': url})
                return (conn, None)     # case 3
        elif result.scheme not in ["http", "https"] or not result.hostname:
            LOG.warn(_LW("[%(rid)d] Received malformed redirect "
                         "location: %(url)s"),
                     {'rid': self._rid(), 'url': url})
            return (conn, None)         # case 3
        # case 2, redirect location includes a scheme
        # so setup a new connection and authenticate
        if allow_release_conn:
            self._api_client.release_connection(conn)
        conn_params = (result.hostname, result.port, result.scheme == "https")
        conn = self._api_client.acquire_redirect_connection(conn_params, True,
                                                            self._headers)
        if result.query:
            url = "%s?%s" % (result.path, result.query)
        else:
            url = result.path
        return (conn, url)
示例#24
0
def validate_nsxv_config_options():
    if (cfg.CONF.nsxv.manager_uri is None or
        cfg.CONF.nsxv.user is None or
        cfg.CONF.nsxv.password is None):
        error = _("manager_uri, user and password must be configured!")
        raise nsx_exc.NsxPluginException(err_msg=error)
    if cfg.CONF.nsxv.dvs_id is None:
        LOG.warning(_LW("dvs_id must be configured to support VLAN's!"))
    if cfg.CONF.nsxv.vdn_scope_id is None:
        LOG.warning(_LW("vdn_scope_id must be configured to support VXLAN's!"))
示例#25
0
    def _warn_on_state_status(self, resource):
        if resource.get('admin_state_up', True) is False:
            LOG.warning(_LW("Setting admin_state_up=False is not supported "
                            "in this plugin version. Ignoring setting for "
                            "resource: %s"), resource)

        if 'status' in resource:
            if resource['status'] != const.NET_STATUS_ACTIVE:
                LOG.warning(_LW("Operational status is internally set by the "
                                "plugin. Ignoring setting status=%s."),
                            resource['status'])
    def check_connections(self):
        """Check connection between Openstack to Nexus device."""
        switch_connections = self._mdriver.get_switch_state()

        for switch_ip in switch_connections:
            state = self._mdriver.get_switch_ip_and_active_state(switch_ip)
            retry_count = self._mdriver.get_switch_retry_count(switch_ip)
            cfg_retry = conf.cfg.CONF.ml2_cisco.switch_replay_count
            if retry_count > cfg_retry:
                continue
            if retry_count == cfg_retry:
                LOG.warn(_LW("check_connections() switch "
                         "%(switch_ip)s retry count %(rcnt)d exceeded "
                         "configured threshold %(thld)d"),
                         {'switch_ip': switch_ip,
                         'rcnt': retry_count,
                         'thld': cfg_retry})
                self._mdriver.incr_switch_retry_count(switch_ip)
                continue
            LOG.debug("check_connections() switch "
                      "%(switch_ip)s state %(state)d",
                      {'switch_ip': switch_ip, 'state': state})
            try:
                nexus_type = self._driver.get_nexus_type(switch_ip)
            except Exception:
                if state is True:
                    LOG.error(_LE("Lost connection to switch ip "
                        "%(switch_ip)s"), {'switch_ip': switch_ip})
                    self._mdriver.set_switch_ip_and_active_state(
                        switch_ip, False)
            else:
                if state is False:
                    self._configure_nexus_type(switch_ip, nexus_type)
                    LOG.info(_LI("Re-established connection to switch "
                        "ip %(switch_ip)s"),
                        {'switch_ip': switch_ip})
                    self._mdriver.set_switch_ip_and_active_state(
                        switch_ip, True)
                    self.replay_config(switch_ip)
                    # If replay failed, it stops trying to configure db entries
                    # and sets switch state to False so this caller knows
                    # it failed.  If it did fail, we increment the
                    # retry counter else reset it to 0.
                    if self._mdriver.get_switch_ip_and_active_state(
                        switch_ip) is False:
                        self._mdriver.incr_switch_retry_count(switch_ip)
                        LOG.warn(_LW("Replay config failed for "
                            "ip %(switch_ip)s"),
                            {'switch_ip': switch_ip})
                    else:
                        self._mdriver.reset_switch_retry_count(switch_ip)
                        LOG.info(_LI("Replay config successful for "
                            "ip %(switch_ip)s"),
                            {'switch_ip': switch_ip})
示例#27
0
    def get_device_details(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        host = kwargs.get('host')
        LOG.debug("Device %(device)s details requested by agent "
                  "%(agent_id)s with host %(host)s",
                  {'device': device, 'agent_id': agent_id, 'host': host})

        plugin = manager.NeutronManager.get_plugin()
        port_id = plugin._device_to_port_id(device)
        port_context = plugin.get_bound_port_context(rpc_context,
                                                     port_id,
                                                     host)
        if not port_context:
            LOG.warning(_LW("Device %(device)s requested by agent "
                            "%(agent_id)s not found in database"),
                        {'device': device, 'agent_id': agent_id})
            return {'device': device}

        segment = port_context.bound_segment
        port = port_context.current

        if not segment:
            LOG.warning(_LW("Device %(device)s requested by agent "
                            "%(agent_id)s on network %(network_id)s not "
                            "bound, vif_type: %(vif_type)s"),
                        {'device': device,
                         'agent_id': agent_id,
                         'network_id': port['network_id'],
                         'vif_type': port[portbindings.VIF_TYPE]})
            return {'device': device}

        new_status = (q_const.PORT_STATUS_BUILD if port['admin_state_up']
                      else q_const.PORT_STATUS_DOWN)
        if port['status'] != new_status:
            plugin.update_port_status(rpc_context,
                                      port_id,
                                      new_status,
                                      host)

        entry = {'device': device,
                 'network_id': port['network_id'],
                 'port_id': port_id,
                 'mac_address': port['mac_address'],
                 'admin_state_up': port['admin_state_up'],
                 'network_type': segment[api.NETWORK_TYPE],
                 'segmentation_id': segment[api.SEGMENTATION_ID],
                 'physical_network': segment[api.PHYSICAL_NETWORK],
                 'fixed_ips': port['fixed_ips'],
                 'device_owner': port['device_owner'],
                 'profile': port[portbindings.PROFILE]}
        LOG.debug("Returning: %s", entry)
        return entry
示例#28
0
    def _destroy_namespace_and_port(self):
        try:
            self.device_manager.destroy(self.network, self.interface_name)
        except RuntimeError:
            LOG.warning(_LW("Failed trying to delete interface: %s"), self.interface_name)

        if self.network.namespace:
            ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
            try:
                ns_ip.netns.delete(self.network.namespace)
            except RuntimeError:
                LOG.warning(_LW("Failed trying to delete namespace: %s"), self.network.namespace)
示例#29
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
                                              timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(_LW("Time since last L3 agent reschedule check has "
                         "exceeded the interval between checks. Waiting "
                         "before check to allow agents to send a heartbeat "
                         "in case there was a clock adjustment."))
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(
            seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            for binding in down_bindings:
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except db_exc.DBError:
            # Catch DB errors here so a transient DB connectivity issue
            # doesn't stop the loopingcall.
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
示例#30
0
 def call(self, action, resource, data, headers, binary=False):
     resp = self._call(action, resource, data, headers, binary)
     if resp[RESP_STATUS] == -1:
         LOG.warning(_LW('vDirect server is not responding (%s).'),
                     self.server)
         return self._recover(action, resource, data, headers, binary)
     elif resp[RESP_STATUS] in (301, 307):
         LOG.warning(_LW('vDirect server is not active (%s).'),
                     self.server)
         return self._recover(action, resource, data, headers, binary)
     else:
         return resp
示例#31
0
    def _schedule_network(self, context, network, existing_agents):
        """Schedule the network to new agents

        :return: all agents associated with the network
        """
        new_agents = self.plugin.schedule_network(context, network) or []
        if new_agents:
            for agent in new_agents:
                self._cast_message(context, 'network_create_end',
                                   {'network': {
                                       'id': network['id']
                                   }}, agent['host'])
        elif not existing_agents:
            LOG.warn(
                _LW('Unable to schedule network %s: no agents available; '
                    'will retry on subsequent port and subnet creation '
                    'events.'), network['id'])
        return new_agents + existing_agents
示例#32
0
def check_read_netns():
    required = checks.netns_read_requires_helper()
    if not required and cfg.CONF.AGENT.use_helper_for_ns_read:
        LOG.warning(_LW("The user that is executing neutron can read the "
                        "namespaces without using the root_helper. Disable "
                        "the use_helper_for_ns_read option to avoid a "
                        "performance impact."))
        # Don't fail because nothing is actually broken. Just not optimal.
        result = True
    elif required and not cfg.CONF.AGENT.use_helper_for_ns_read:
        LOG.error(_LE("The user that is executing neutron does not have "
                      "permissions to read the namespaces. Enable the "
                      "use_helper_for_ns_read configuration option."))
        result = False
    else:
        # everything is configured appropriately
        result = True
    return result
示例#33
0
 def get_network_info(self, context, **kwargs):
     """Retrieve and return a extended information about a network."""
     network_id = kwargs.get('network_id')
     host = kwargs.get('host')
     LOG.debug('Network %(network_id)s requested from '
               '%(host)s', {'network_id': network_id,
                            'host': host})
     plugin = manager.NeutronManager.get_plugin()
     try:
         network = plugin.get_network(context, network_id)
     except n_exc.NetworkNotFound:
         LOG.warn(_LW("Network %s could not be found, it might have "
                      "been deleted concurrently."), network_id)
         return
     filters = dict(network_id=[network_id])
     network['subnets'] = plugin.get_subnets(context, filters=filters)
     network['ports'] = plugin.get_ports(context, filters=filters)
     return network
示例#34
0
    def get_dvr_mac_address(self):
        try:
            self.get_dvr_mac_address_with_retry()
        except oslo_messaging.RemoteError as e:
            LOG.warning(_LW('L2 agent could not get DVR MAC address at '
                            'startup due to RPC error.  It happens when the '
                            'server does not support this RPC API.  Detailed '
                            'message: %s'), e)
        except oslo_messaging.MessagingTimeout:
            LOG.error(_LE('DVR: Failed to obtain a valid local '
                          'DVR MAC address - L2 Agent operating '
                          'in Non-DVR Mode'))

        if not self.in_distributed_mode():
            # switch all traffic using L2 learning
            # REVISIT(yamamoto): why to install the same flow as
            # setup_integration_br?
            self.int_br.install_normal()
示例#35
0
 def _get_dvs_for_port_id(self, port_id, p_key=None):
     # Check if port is already known
     known_ports = (set.union(*self.dvs_port_map.values())
                    if self.dvs_port_map.values() else {})
     # If port is not known - get fresh port_map from vCenter
     if port_id not in known_ports:
         if p_key:
             dvs = dvs_util.get_dvs_by_id_and_key(
                 self.networking_map.values(), port_id, p_key)
             if dvs:
                 return self._get_dvs_and_put_dvs_in_port_map(dvs, port_id)
         port_map = dvs_util.create_port_map(self.networking_map.values())
     else:
         port_map = self.dvs_port_map
     for dvs, port_list in port_map.iteritems():
         if port_id in port_list:
             return self._get_dvs_and_put_dvs_in_port_map(dvs, port_id)
     LOG.warning(_LW("Can find dvs for port %s"), port_id)
示例#36
0
 def __init__(self, interface_mappings):
     self.interface_mappings = interface_mappings
     self.ip = ip_lib.IPWrapper()
     # VXLAN related parameters:
     self.local_ip = cfg.CONF.VXLAN.local_ip
     self.vxlan_mode = lconst.VXLAN_NONE
     if cfg.CONF.VXLAN.enable_vxlan:
         device = self.ip.get_device_by_ip(self.local_ip)
         if device:
             self.local_int = device.name
             self.check_vxlan_support()
         else:
             self.local_int = None
             LOG.warning(
                 _LW('VXLAN is enabled, a valid local_ip '
                     'must be provided'))
     # Store network mapping to segments
     self.network_map = {}
示例#37
0
    def _get_ports_from_server(self,
                               router_id=None,
                               ip_address=None,
                               networks=None):
        """Either get ports from server by RPC or fallback to neutron client"""
        filters = self._get_port_filters(router_id, ip_address, networks)
        if self.use_rpc:
            try:
                return self.plugin_rpc.get_ports(self.context, filters)
            except (oslo_messaging.MessagingException, AttributeError):
                # TODO(obondarev): remove fallback once RPC is proven
                # to work fine with metadata agent (K or L release at most)
                LOG.warning(
                    _LW('Server does not support metadata RPC, '
                        'fallback to using neutron client'))
                self.use_rpc = False

        return self._get_ports_using_client(filters)
示例#38
0
 def _get_lla_gateway_ip_for_subnet(self, context, subnet):
     query = context.session.query(models_v2.Port.mac_address)
     query = query.join(models_v2.IPAllocation)
     query = query.filter(models_v2.IPAllocation.subnet_id == subnet['id'])
     query = query.filter(
         models_v2.IPAllocation.ip_address == subnet['gateway_ip'])
     query = query.filter(
         models_v2.Port.device_owner.in_(q_const.ROUTER_INTERFACE_OWNERS))
     try:
         mac_address = query.one()[0]
     except (exc.NoResultFound, exc.MultipleResultsFound):
         LOG.warn(
             _LW('No valid gateway port on subnet %s is '
                 'found for IPv6 RA'), subnet['id'])
         return
     lla_ip = str(
         ipv6.get_ipv6_addr_by_EUI64(q_const.IPV6_LLA_PREFIX, mac_address))
     return lla_ip
示例#39
0
 def treat_vif_port(self, vif_port, port_id, network_id, network_type,
                    physical_network, segmentation_id, admin_state_up):
     if vif_port:
         # When this function is called for a port, the port should have
         # an OVS ofport configured, as only these ports were considered
         # for being treated. If that does not happen, it is a potential
         # error condition of which operators should be aware
         if not vif_port.ofport:
             LOG.warn(_LW("VIF port: %s has no ofport configured, "
                          "and might not be able to transmit"),
                      vif_port.port_name)
         if admin_state_up:
             self.port_bound(vif_port, network_id, network_type,
                             physical_network, segmentation_id)
         else:
             self.port_dead(vif_port)
     else:
         LOG.debug("No VIF port for port %s defined on agent.", port_id)
示例#40
0
    def _get_subnet_data(self, subnet_id, get_mapping=True):
        subnet = None
        subl2dom = None
        try:
            if get_mapping:
                subl2dom_db = nuagedb.get_subnet_l2dom_with_lock(
                    self.context.session,
                    subnet_id)
                subl2dom = nuagedb.make_subnl2dom_dict(subl2dom_db)

            subnet_db = nuagedb.get_subnet_with_lock(self.context.session,
                                                     subnet_id)
            subnet = self._make_subnet_dict(subnet_db)
        except db_exc.NoResultFound:
            LOG.warning(_LW("Subnet %s not found in neutron for sync"),
                        subnet_id)

        return subnet, subl2dom
示例#41
0
 def init_firewall(self, defer_refresh_firewall=False):
     firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver
     LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
     if not _is_valid_driver_combination():
         LOG.warn(_LW("Driver configuration doesn't match "
                      "with enable_security_group"))
     if not firewall_driver:
         firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver'
     self.firewall = importutils.import_object(firewall_driver)
     # The following flag will be set to true if port filter must not be
     # applied as soon as a rule or membership notification is received
     self.defer_refresh_firewall = defer_refresh_firewall
     # Stores devices for which firewall should be refreshed when
     # deferred refresh is enabled.
     self.devices_to_refilter = set()
     # Flag raised when a global refresh is needed
     self.global_refresh_firewall = False
     self._use_enhanced_rpc = None
示例#42
0
 def _sync_delete_networks(self, combined_res_info, vsm_ip):
     """Sync networks by deleting extraneous ones from VSM."""
     (vsm_net_uuids, neutron_nets) = combined_res_info
     neutron_net_uuids = set(self._get_uuids(
         n1kv_const.NETWORKS, neutron_nets))
     for net_id in vsm_net_uuids - neutron_net_uuids:
         # delete these networks from VSM
         bd_name = net_id + n1kv_const.BRIDGE_DOMAIN_SUFFIX
         if bd_name in self.bd_names:
             segment_type = p_const.TYPE_VXLAN
         else:
             segment_type = p_const.TYPE_VLAN
         try:
             self.n1kvclient.delete_network_segment(net_id, segment_type,
                                                    vsm_ip=vsm_ip)
         except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
             LOG.warning(_LW('Sync exception: Network delete failed for '
                             '%s.') % net_id)
示例#43
0
 def _sync_create_networks(self, combined_res_info, vsm_ip):
     """Sync networks by creating missing ones on VSM."""
     (vsm_net_uuids, neutron_nets) = combined_res_info
     for network in neutron_nets:
         if network['id'] not in vsm_net_uuids:
             network_profile = n1kv_db.get_network_profile_by_network(
                 network['id'])
             binding = n1kv_db.get_network_binding(network['id'])
             network[providernet.SEGMENTATION_ID] = binding.segmentation_id
             network[providernet.NETWORK_TYPE] = binding.network_type
             # create these networks on VSM
             try:
                 self.n1kvclient.create_network_segment(network,
                                                        network_profile,
                                                        vsm_ip=vsm_ip)
             except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
                 LOG.warning(_LW('Sync exception: Network create failed '
                                 'for %s.') % network['id'])
示例#44
0
 def firewall_deleted(self, context, firewall_id, **kwargs):
     """Agent uses this to indicate firewall is deleted."""
     LOG.debug("firewall_deleted() called")
     with context.session.begin(subtransactions=True):
         fw_db = self.plugin._get_firewall(context, firewall_id)
         # allow to delete firewalls in ERROR state
         if fw_db.status in (const.PENDING_DELETE, const.ERROR):
             self.plugin.delete_db_firewall_object(context, firewall_id)
             return True
         else:
             LOG.warn(
                 _LW('Firewall %(fw)s unexpectedly deleted by agent, '
                     'status was %(status)s'), {
                         'fw': firewall_id,
                         'status': fw_db.status
                     })
             fw_db.status = const.ERROR
             return False
示例#45
0
 def optimizer_deleted(self, context, optimizer_id, **kwargs):
     """Agent uses this to indicate optimizer is deleted."""
     LOG.debug("optimizer_deleted() called")
     with context.session.begin(subtransactions=True):
         opt_db = self.plugin._get_optimizer(context, optimizer_id)
         # allow to delete optimizers in ERROR state
         if opt_db.status in (const.PENDING_DELETE, const.ERROR):
             self.plugin.delete_db_optimizer_object(context, optimizer_id)
             return True
         else:
             LOG.warn(
                 _LW('Optimizer %(opt)s unexpectedly deleted by agent, '
                     'status was %(status)s'), {
                         'opt': optimizer_id,
                         'status': opt_db.status
                     })
             opt_db.update({"status": const.ERROR})
             return False
示例#46
0
    def _router_removed(self, router_id):
        ri = self.router_info.get(router_id)
        if ri is None:
            LOG.warn(
                _LW("Info for router %s was not found. "
                    "Performing router cleanup"), router_id)
            self.namespaces_manager.ensure_router_cleanup(router_id)
            return

        registry.notify(resources.ROUTER,
                        events.BEFORE_DELETE,
                        self,
                        router=ri)

        ri.delete(self)
        del self.router_info[router_id]

        registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri)
示例#47
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        try:
            down_bindings = (context.session.query(RouterL3AgentBinding).join(
                agents_db.Agent).filter(
                    agents_db.Agent.heartbeat_timestamp < cutoff,
                    agents_db.Agent.admin_state_up).outerjoin(
                        l3_attrs_db.RouterExtraAttributes,
                        l3_attrs_db.RouterExtraAttributes.router_id ==
                        RouterL3AgentBinding.router_id).filter(
                            sa.or_(
                                l3_attrs_db.RouterExtraAttributes.ha ==
                                sql.false(),
                                l3_attrs_db.RouterExtraAttributes.ha ==
                                sql.null())))

            for binding in down_bindings:
                LOG.warn(
                    _LW("Rescheduling router %(router)s from agent %(agent)s "
                        "because the agent did not report to the server in "
                        "the last %(dead_time)s seconds."), {
                            'router': binding.router_id,
                            'agent': binding.l3_agent_id,
                            'dead_time': agent_dead_limit
                        })
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(
                _LE("Exception encountered during router "
                    "rescheduling."))
示例#48
0
    def free_vpn_id(self, context, **kwargs):
        router_id = kwargs.get('msg').get('router_id')
        vpn_id = kwargs.get('msg').get('vpn_id')
        if router_id is None or vpn_id is None:
            return
        LOG.info(_LW("free_vpn_id router_id:%s"), router_id)
        with self.vpn_lock:
            if router_id in self.vpn_info:
                vpn_id = self.vpn_info[router_id]
                self.vpn_alloc.append(int(vpn_id))
                del self.vpn_info[router_id]
            else:
                return

        session = self.session
        with session.begin(subtransactions=True):
            (session.query(VpnAllocation).
                     filter_by(vpn_id=vpn_id, router_id=router_id).delete())
        return
示例#49
0
 def _report_state(self):
     try:
         self.agent_state.get('configurations').update(
             self.cache.get_state())
         ctx = context.get_admin_context_without_session()
         self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
         self.use_call = False
     except AttributeError:
         # This means the server does not support report_state
         LOG.warn(_LW("Neutron server does not support state report."
                      " State report for this agent will be disabled."))
         self.heartbeat.stop()
         self.run()
         return
     except Exception:
         LOG.exception(_LE("Failed reporting state!"))
         return
     if self.agent_state.pop('start_flag', None):
         self.run()
示例#50
0
    def _parse_vf_link_show(self, vf_line):
        """Parses vf link show command output line.

        @param vf_line: link show vf line
        """
        vf_details = {}
        pattern_match = self.VF_DETAILS_REG_EX.match(vf_line)
        if pattern_match:
            vf_details["vf"] = int(pattern_match.group("vf_index"))
            vf_details["MAC"] = pattern_match.group("mac")
            vf_details["link-state"] = pattern_match.group("state")
        else:
            LOG.warning(
                _LW("failed to parse vf link show line %(line)s: "
                    "for %(device)s"), {
                        'line': vf_line,
                        'device': self.dev_name
                    })
        return vf_details
    def fetch_element_id(self):
        json_result = self.get_elements()

        if not json_result[0]['result']:
            LOG.warning(_LW("No #{element_type} defined in SMC"))
        else:
            for element in json_result[0]['result']:
                href = element['href']
                self.element_id = int(href.split('/')[-1])
                if element['name'] == self.name:
                    LOG.debug("%(type)s element with name %(name)s FOUND "
                              "%(href)s",
                              {'type': self.element_type,
                               'name': self.name,
                               'href': href})
                    break

        LOG.debug("Got ID %s", self.element_id)
        return self.element_id
示例#52
0
    def _sync_delete_network_profiles(self, combined_res_info, vsm_ip):
        """
        Sync network profiles by deleting extraneous ones from VSM.

        :param combined_res_info: tuple containing VSM and neutron information
        :param vsm_ip: string representing the IP address of the VSM
        """
        (vsm_net_profile_uuids, neutron_net_profiles) = combined_res_info
        neutron_net_profile_uuids = set(
            self._get_uuids(NETWORK_PROFILES, neutron_net_profiles))
        for np_id in vsm_net_profile_uuids - neutron_net_profile_uuids:
            # delete these network profiles from VSM
            try:
                self.n1kvclient.delete_network_segment_pool(np_id,
                                                            vsm_ip=vsm_ip)
            except n1kv_exc.VSMError as e:
                LOG.warning(
                    _LW('Sync Exception: Network profile deletion on '
                        'VSM failed: %s'), e.message)
示例#53
0
    def remove_chain(self, name, wrap=True, log_not_found=True):
        """Remove named chain.

        This removal "cascades". All rules in the chain are removed, as are
        all rules in other chains that jump to it.

        If the chain is not found then this is merely logged.

        """
        name = get_chain_name(name, wrap, self.prefix_chain)
        chain_set = self._select_chain_set(wrap)

        if name not in chain_set:
            if log_not_found:
                LOG.warn(
                    _LW('Attempted to remove chain %s '
                        'which does not exist'), name)
            return

        chain_set.remove(name)

        if not wrap:
            # non-wrapped chains and rules need to be dealt with specially,
            # so we keep a list of them to be iterated over in apply()
            self.chains_to_remove.add(name)

            # first, add rules to remove that have a matching chain name
            self.rules_to_remove += [r for r in self.rules if r.chain == name]

        # next, remove rules from list that have a matching chain name
        self.rules = [r for r in self.rules if r.chain != name]

        if not wrap:
            jump_snippet = '-j %s' % name
            # next, add rules to remove that have a matching jump chain
            self.rules_to_remove += [
                r for r in self.rules if jump_snippet in r.rule
            ]
        else:
            jump_snippet = '-j %s-%s' % (self.prefix_chain, name)

        # finally, remove rules from list that have a matching jump chain
        self.rules = [r for r in self.rules if jump_snippet not in r.rule]
示例#54
0
    def update_port_filter(self, port):
        LOG.debug("OFW Updating device (%s) filter: %s", port['device'], port)
        if port['device'] not in self._filtered_ports:
            LOG.info(
                _('Attempted to update port filter which is not '
                  'filtered %s'), port['device'])
            return

        old_port = self._filtered_ports[port['device']]
        vif_port = self._int_br_not_deferred.get_vif_port_by_id(port['device'])
        if not vif_port:
            LOG.info(
                _LW("Port %(port_id)s not present in bridge. Skip"
                    "applying rules for this port"), {'port_id': port})
        port['vinfo'] = self._vif_port_info(vif_port.port_name)
        self._filtered_ports[port['device']] = port
        self._remove_flows(old_port, vif_port)
        self._add_base_flows(port, vif_port)
        self.known_in_port_for_device[port['device']] = vif_port.ofport
示例#55
0
 def agent_health_check(self):
     """Scan agents and log if some are considered dead."""
     agents = self.get_agents(context.get_admin_context(),
                              filters={'admin_state_up': [True]})
     dead_agents = [agent for agent in agents if not agent['alive']]
     if dead_agents:
         data = '%20s %20s %s\n' % ('Type', 'Last heartbeat', "host")
         data += '\n'.join(['%20s %20s %s' %
                            (agent['agent_type'],
                             agent['heartbeat_timestamp'],
                             agent['host']) for agent in dead_agents])
         LOG.warn(_LW("Agent healthcheck: found %(count)s dead agents "
                      "out of %(total)s:\n%(data)s"),
                  {'count': len(dead_agents),
                   'total': len(agents),
                   'data': data})
     else:
         LOG.debug("Agent healthcheck: found %s active agents",
                   len(agents))
示例#56
0
    def _get_emb_eswitch(self, device_mac, pci_slot):
        """Get embedded switch.

        Get embedded switch by pci slot and validate pci has device mac
        @param device_mac: device mac
        @param pci_slot: pci slot
        """
        embedded_switch = self.pci_slot_map.get(pci_slot)
        if embedded_switch:
            used_device_mac = embedded_switch.get_pci_device(pci_slot)
            if used_device_mac != device_mac:
                LOG.warning(
                    _LW("device pci mismatch: %(device_mac)s "
                        "- %(pci_slot)s"), {
                            "device_mac": device_mac,
                            "pci_slot": pci_slot
                        })
                embedded_switch = None
        return embedded_switch
    def _is_valid_segment(self, segment):
        valid_segment = True
        if segment:
            if (segment[api.NETWORK_TYPE] != p_const.TYPE_VLAN
                    or not self._valid_network_segment(segment)):
                LOG.warn(
                    _LW("Nexus: Segment is an invalid type or not "
                        "supported by this driver. Network type = "
                        "%(network_type)s Physical network = "
                        "%(phy_network)s. Event not processed."), {
                            'network_type': segment[api.NETWORK_TYPE],
                            'phy_network': segment[api.PHYSICAL_NETWORK]
                        })
                valid_segment = False
        else:
            self._log_missing_segment()
            valid_segment = False

        return valid_segment
示例#58
0
    def restart(self):
        # stop() followed immediately by a start() runs the risk that the
        # current pluto daemon has not had a chance to shutdown. We check
        # the current process information to see if the daemon is still
        # running and if so, wait a short interval and retry.
        self.stop()
        wait_interval = cfg.CONF.libreswan.shutdown_check_timeout
        for i in range(cfg.CONF.libreswan.shutdown_check_retries):
            if not self._process_running():
                self._cleanup_control_files()
                break
            eventlet.sleep(wait_interval)
            wait_interval *= cfg.CONF.libreswan.shutdown_check_back_off
        else:
            LOG.warning(
                _LW('Server appears to still be running, restart '
                    'of router %s may fail'), self.id)

        super(LibreSwanProcess, self).start()
示例#59
0
 def get_devices_details_list(self, context, devices, agent_id, host=None):
     try:
         cctxt = self.client.prepare(version='1.3')
         res = cctxt.call(context,
                          'get_devices_details_list',
                          devices=devices,
                          agent_id=agent_id,
                          host=host)
     except oslo_messaging.UnsupportedVersion:
         # If the server has not been upgraded yet, a DVR-enabled agent
         # may not work correctly, however it can function in 'degraded'
         # mode, in that DVR routers may not be in the system yet, and
         # it might be not necessary to retrieve info about the host.
         LOG.warn(_LW('DVR functionality requires a server upgrade.'))
         res = [
             self.get_device_details(context, device, agent_id, host)
             for device in devices
         ]
     return res
示例#60
0
    def __init__(self):
        # FIXME(jamielennox): A notifier is being created for each Controller
        # and each Notifier is handling it's own auth. That means that we are
        # authenticating the exact same thing len(controllers) times. This
        # should be an easy thing to optimize.
        auth = ks_auth.load_from_conf_options(cfg.CONF, 'nova')
        endpoint_override = None

        if not auth:
            LOG.warning(
                _LW('Authenticating to nova using nova_admin_* options'
                    ' is deprecated. This should be done using'
                    ' an auth plugin, like password'))

            if cfg.CONF.nova_admin_tenant_id:
                endpoint_override = "%s/%s" % (cfg.CONF.nova_url,
                                               cfg.CONF.nova_admin_tenant_id)

            auth = DefaultAuthPlugin(
                auth_url=cfg.CONF.nova_admin_auth_url,
                username=cfg.CONF.nova_admin_username,
                password=cfg.CONF.nova_admin_password,
                tenant_id=cfg.CONF.nova_admin_tenant_id,
                tenant_name=cfg.CONF.nova_admin_tenant_name,
                endpoint_override=endpoint_override)

        session = ks_session.Session.load_from_conf_options(cfg.CONF,
                                                            'nova',
                                                            auth=auth)

        # NOTE(andreykurilin): novaclient.v1_1 was renamed to v2 and there is
        # no way to import the contrib module directly without referencing v2,
        # which would only work for novaclient >= 2.21.0.
        novaclient_cls = nova_client.get_client_class(NOVA_API_VERSION)
        server_external_events = importutils.import_module(
            novaclient_cls.__module__.replace(
                ".client", ".contrib.server_external_events"))

        self.nclient = novaclient_cls(session=session,
                                      region_name=cfg.CONF.nova.region_name,
                                      extensions=[server_external_events])
        self.batch_notifier = batch_notifier.BatchNotifier(
            cfg.CONF.send_events_interval, self.send_events)