Exemple #1
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("Required IPC directory does not exist at"
                            " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy, consume_in, zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("Permission denied to IPC directory at"
                            " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Could not create ZeroMQ receiver daemon. "
                        "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Exemple #2
0
    def _agent_registration(self):
        """Register this agent with the server.

        This method registers the cfg agent with the neutron server so hosting
        devices can be assigned to it. In case the server is not ready to
        accept registration (it sends a False) then we retry registration
        for `MAX_REGISTRATION_ATTEMPTS` with a delay of
        `REGISTRATION_RETRY_DELAY`. If there is no server response or a
        failure to register after the required number of attempts,
        the agent stops itself.
        """
        for attempts in xrange(MAX_REGISTRATION_ATTEMPTS):
            context = n_context.get_admin_context_without_session()
            self.send_agent_report(self.agent_state, context)
            res = self.devmgr_rpc.register_for_duty(context)
            if res is True:
                LOG.info(_LI("[Agent registration] Agent successfully "
                           "registered"))
                return
            elif res is False:
                LOG.warning(_LW("[Agent registration] Neutron server said "
                                "that device manager was not ready. Retrying "
                                "in %0.2f seconds "), REGISTRATION_RETRY_DELAY)
                time.sleep(REGISTRATION_RETRY_DELAY)
            elif res is None:
                LOG.error(_LE("[Agent registration] Neutron server said that "
                              "no device manager was found. Cannot continue. "
                              "Exiting!"))
                raise SystemExit("Cfg Agent exiting")
        LOG.error(_LE("[Agent registration] %d unsuccessful registration "
                    "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS)
        raise SystemExit("Cfg Agent exiting")
 def _get_profile_id(cls, p_type, resource, name):
     try:
         tenant_id = manager.NeutronManager.get_service_plugins()[
             constants.L3_ROUTER_NAT].l3_tenant_id()
     except AttributeError:
         return
     if tenant_id is None:
         return
     core_plugin = manager.NeutronManager.get_plugin()
     if p_type == 'net_profile':
         profiles = core_plugin.get_network_profiles(
             n_context.get_admin_context(),
             {'tenant_id': [tenant_id], 'name': [name]},
             ['id'])
     else:
         profiles = core_plugin.get_policy_profiles(
             n_context.get_admin_context(),
             {'tenant_id': [tenant_id], 'name': [name]},
             ['id'])
     if len(profiles) == 1:
         return profiles[0]['id']
     elif len(profiles) > 1:
         # Profile must have a unique name.
         LOG.error(_LE('The %(resource)s %(name)s does not have unique '
                       'name. Please refer to admin guide and create one.'),
                   {'resource': resource, 'name': name})
     else:
         # Profile has not been created.
         LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to '
                     'admin guide and create one.'),
                   {'resource': resource, 'name': name})
Exemple #4
0
    def create_router(self, host, username, password, rbridge_id, router_id):
        """create vrf and associate vrf."""
        router_id = router_id[0:11]
        vrf_name = template.OS_VRF_NAME.format(id=router_id)
        rd = router_id + ":" + router_id
        try:
            mgr = self.connect(host, username, password)
            self.create_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("NETCONF error"))
                self.close_session()
        try:
            # For Nos5.0.0
            self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
            self.configure_address_family_for_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception() as ctxt:
                try:
                    # This is done because on 4.0.0 rd doesnt accept alpha
                    # character nor hyphen
                    rd = "".join(i for i in router_id if i in "0123456789")
                    rd = rd[:4] + ":" + rd[:4]
                    self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
                    self.configure_address_family_for_vrf_v1(mgr,
                                                             rbridge_id,
                                                             vrf_name)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_LE("NETCONF error"))
                        self.close_session()

                ctxt.reraise = False
Exemple #5
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Required IPC directory does not exist at"
                                  " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Permission denied to IPC directory at"
                                  " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
                              "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Exemple #6
0
 def mgmt_nw_id(cls):
     """Returns id of the management network."""
     if cls._mgmt_nw_uuid is None:
         tenant_id = cls.l3_tenant_id()
         if not tenant_id:
             return
         net = manager.NeutronManager.get_plugin().get_networks(
             neutron_context.get_admin_context(),
             {'tenant_id': [tenant_id],
              'name': [cfg.CONF.general.management_network]},
             ['id', 'subnets'])
         if len(net) == 1:
             num_subnets = len(net[0]['subnets'])
             if num_subnets == 0:
                 LOG.error(_LE('The virtual management network has no '
                             'subnet. Please assign one.'))
                 return
             elif num_subnets > 1:
                 LOG.info(_LI('The virtual management network has %d '
                            'subnets. The first one will be used.'),
                          num_subnets)
             cls._mgmt_nw_uuid = net[0].get('id')
         elif len(net) > 1:
             # Management network must have a unique name.
             LOG.error(_LE('The virtual management network does not have '
                         'unique name. Please ensure that it is.'))
         else:
             # Management network has not been created.
             LOG.error(_LE('There is no virtual management network. Please '
                         'create one.'))
     return cls._mgmt_nw_uuid
Exemple #7
0
 def mgmt_sec_grp_id(cls):
     """Returns id of security group used by the management network."""
     if not utils.is_extension_supported(
             manager.NeutronManager.get_plugin(), "security-group"):
         return
     if cls._mgmt_sec_grp_id is None:
         # Get the id for the _mgmt_security_group_id
         tenant_id = cls.l3_tenant_id()
         res = manager.NeutronManager.get_plugin().get_security_groups(
             neutron_context.get_admin_context(),
             {'tenant_id': [tenant_id],
              'name': [cfg.CONF.general.default_security_group]},
             ['id'])
         if len(res) == 1:
             cls._mgmt_sec_grp_id = res[0].get('id')
         elif len(res) > 1:
             # the mgmt sec group must be unique.
             LOG.error(_LE('The security group for the virtual management '
                         'network does not have unique name. Please ensure '
                         'that it is.'))
         else:
             # CSR Mgmt security group is not present.
             LOG.error(_LE('There is no security group for the virtual '
                         'management network. Please create one.'))
     return cls._mgmt_sec_grp_id
Exemple #8
0
    def _callback_handler(self, message, callback):
        """Call callback with deserialized message.

        Messages that are processed without exception are ack'ed.

        If the message processing generates an exception, it will be
        ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
        """

        try:
            msg = rpc_common.deserialize_msg(message.payload)
            callback(msg)
        except Exception:
            if self.ack_on_error:
                LOG.exception(
                    _LE("Failed to process message"
                        " ... skipping it."))
                message.ack()
            else:
                LOG.exception(
                    _LE("Failed to process message"
                        " ... will requeue."))
                message.requeue()
        else:
            message.ack()
    def ensure_physical_in_bridge(self, network_id, network_type,
                                  physical_network, segmentation_id):
        if network_type == p_const.TYPE_VXLAN:
            if self.vxlan_mode == lconst.VXLAN_NONE:
                LOG.error(_LE("Unable to add vxlan interface for network %s"),
                          network_id)
                return
            return self.ensure_vxlan_bridge(network_id, segmentation_id)

        physical_interface = self.interface_mappings.get(physical_network)
        if not physical_interface:
            LOG.error(_LE("No mapping for physical network %s"),
                      physical_network)
            return
        if network_type == p_const.TYPE_FLAT:
            return self.ensure_flat_bridge(network_id, physical_interface)
        elif network_type == p_const.TYPE_VLAN:
            return self.ensure_vlan_bridge(network_id, physical_interface,
                                           segmentation_id)
        else:
            LOG.error(
                _LE("Unknown network_type %(network_type)s for network "
                    "%(network_id)s."), {
                        network_type: network_type,
                        network_id: network_id
                    })
Exemple #10
0
def get_active_routers_for_host(context, host):
    '''Get list of routers from INI file that use host requested.'''
    routers = []
    configured_routers = get_available_csrs_from_config(cfg.CONF.config_file)
    if not configured_routers:
        LOG.error(_LE("No routers found in INI file!"))
        return routers
    for router_ip, info in configured_routers.items():
        if host == info['host']:
            router_id = _get_router_id_via_external_ip(context, router_ip)
            if router_id:
                LOG.debug("Found router %(router)s on host %(host)s", {
                    'router': router_id,
                    'host': host
                })
                routers.append({
                    'id': router_id,
                    'hosting_device': {
                        'management_ip_address': info['rest_mgmt_ip'],
                        'credentials': {
                            'username': info['username'],
                            'password': info['password']
                        }
                    },
                    'tunnel_if': info['tunnel_if'],
                    'tunnel_ip': info['tunnel_ip']
                })
            else:
                LOG.error(
                    _LE("Unable to lookup router ID based on router's "
                        "public IP (%s) in INI file"), router_ip)
    if not routers:
        LOG.error(_LE("No matching routers on host %s"), host)
    return routers
Exemple #11
0
    def sync_state(self, networks=None):
        """Sync the local DHCP state with Neutron. If no networks are passed,
        or 'None' is one of the networks, sync all of the networks.
        """
        only_nets = set([] if (not networks or None in networks) else networks)
        LOG.info(_LI('Synchronizing state'))
        pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
        known_network_ids = set(self.cache.get_network_ids())

        try:
            active_networks = self.plugin_rpc.get_active_networks_info()
            active_network_ids = set(network.id for network in active_networks)
            for deleted_id in known_network_ids - active_network_ids:
                try:
                    self.disable_dhcp_helper(deleted_id)
                except Exception as e:
                    self.schedule_resync(e, deleted_id)
                    LOG.exception(
                        _LE('Unable to sync network state on '
                            'deleted network %s'), deleted_id)

            for network in active_networks:
                if (not only_nets or  # specifically resync all
                        network.id not in known_network_ids or  # missing net
                        network.id in only_nets):  # specific network to sync
                    pool.spawn(self.safe_configure_dhcp_for_network, network)
            pool.waitall()
            LOG.info(_LI('Synchronizing state complete'))

        except Exception as e:
            self.schedule_resync(e)
            LOG.exception(_LE('Unable to sync network state.'))
def get_active_routers_for_host(context, host):
    '''Get list of routers from INI file that use host requested.'''
    routers = []
    configured_routers = get_available_csrs_from_config(cfg.CONF.config_file)
    if not configured_routers:
        LOG.error(_LE("No routers found in INI file!"))
        return routers
    for router_ip, info in configured_routers.items():
        if host == info['host']:
            router_id = _get_router_id_via_external_ip(context, router_ip)
            if router_id:
                LOG.debug("Found router %(router)s on host %(host)s",
                          {'router': router_id, 'host': host})
                routers.append({
                    'id': router_id,
                    'hosting_device': {
                        'management_ip_address': info['rest_mgmt_ip'],
                        'credentials': {'username': info['username'],
                                        'password': info['password']}
                    },
                    'tunnel_if': info['tunnel_if'],
                    'tunnel_ip': info['tunnel_ip']
                })
            else:
                LOG.error(_LE("Unable to lookup router ID based on router's "
                              "public IP (%s) in INI file"), router_ip)
    if not routers:
        LOG.error(_LE("No matching routers on host %s"), host)
    return routers
Exemple #13
0
    def sync_state(self, networks=None):
        """Sync the local DHCP state with Neutron. If no networks are passed,
        or 'None' is one of the networks, sync all of the networks.
        """
        only_nets = set([] if (not networks or None in networks) else networks)
        LOG.info(_LI('Synchronizing state'))
        pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
        known_network_ids = set(self.cache.get_network_ids())

        try:
            active_networks = self.plugin_rpc.get_active_networks_info()
            active_network_ids = set(network.id for network in active_networks)
            for deleted_id in known_network_ids - active_network_ids:
                try:
                    self.disable_dhcp_helper(deleted_id)
                except Exception as e:
                    self.schedule_resync(e, deleted_id)
                    LOG.exception(_LE('Unable to sync network state on '
                                      'deleted network %s'), deleted_id)

            for network in active_networks:
                if (not only_nets or  # specifically resync all
                        network.id not in known_network_ids or  # missing net
                        network.id in only_nets):  # specific network to sync
                    pool.spawn(self.safe_configure_dhcp_for_network, network)
            pool.waitall()
            LOG.info(_LI('Synchronizing state complete'))

        except Exception as e:
            self.schedule_resync(e)
            LOG.exception(_LE('Unable to sync network state.'))
Exemple #14
0
    def provision_local_vlan(self, net_uuid, network_type, physical_network,
                             segmentation_id):
        """Provisions a local VLAN.

        :param net_uuid: the uuid of the network associated with this vlan.
        :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
                                               'local')
        :param physical_network: the physical network for 'vlan' or 'flat'
        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
        """

        if not self.available_local_vlans:
            LOG.error(_LE("No local VLAN available for net-id=%s"), net_uuid)
            return
        lvid = self.available_local_vlans.pop()
        LOG.info(
            _LI("Assigning %(vlan_id)s as local vlan for "
                "net-id=%(net_uuid)s"), {
                    'vlan_id': lvid,
                    'net_uuid': net_uuid
                })
        self.local_vlan_map[net_uuid] = LocalVLANMapping(
            lvid, network_type, physical_network, segmentation_id)

        if network_type in constants.TUNNEL_NETWORK_TYPES:
            if self.enable_tunneling:
                self.int_br.provision_tenant_tunnel(network_type, lvid,
                                                    segmentation_id)
            else:
                LOG.error(
                    _LE("Cannot provision %(network_type)s network for "
                        "net-id=%(net_uuid)s - tunneling disabled"), {
                            'network_type': network_type,
                            'net_uuid': net_uuid
                        })
        elif network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT]:
            if physical_network in self.int_ofports:
                phys_port = self.int_ofports[physical_network]
                self.int_br.provision_tenant_physnet(network_type, lvid,
                                                     segmentation_id,
                                                     phys_port)
            else:
                LOG.error(
                    _LE("Cannot provision %(network_type)s network for "
                        "net-id=%(net_uuid)s - no bridge for "
                        "physical_network %(physical_network)s"), {
                            'network_type': network_type,
                            'net_uuid': net_uuid,
                            'physical_network': physical_network
                        })
        elif network_type == p_const.TYPE_LOCAL:
            # no flows needed for local networks
            pass
        else:
            LOG.error(
                _LE("Cannot provision unknown network type "
                    "%(network_type)s for net-id=%(net_uuid)s"), {
                        'network_type': network_type,
                        'net_uuid': net_uuid
                    })
Exemple #15
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, "_clock_jump_canary", timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(
                _LW(
                    "Time since last L3 agent reschedule check has "
                    "exceeded the interval between checks. Waiting "
                    "before check to allow agents to send a heartbeat "
                    "in case there was a clock adjustment."
                )
            )
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding)
            .join(agents_db.Agent)
            .filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up)
            .outerjoin(
                l3_attrs_db.RouterExtraAttributes,
                l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id,
            )
            .filter(
                sa.or_(
                    l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                    l3_attrs_db.RouterExtraAttributes.ha == sql.null(),
                )
            )
        )
        try:
            for binding in down_bindings:
                LOG.warn(
                    _LW(
                        "Rescheduling router %(router)s from agent %(agent)s "
                        "because the agent did not report to the server in "
                        "the last %(dead_time)s seconds."
                    ),
                    {"router": binding.router_id, "agent": binding.l3_agent_id, "dead_time": agent_dead_limit},
                )
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed, n_rpc.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id)
        except db_exc.DBError:
            # Catch DB errors here so a transient DB connectivity issue
            # doesn't stop the loopingcall.
            LOG.exception(_LE("Exception encountered during router " "rescheduling."))
Exemple #16
0
    def _call(self, action, resource, data, headers, binary=False):
        if resource.startswith('http'):
            uri = resource
        else:
            uri = self.base_uri + resource
        if binary:
            body = data
        else:
            body = jsonutils.dumps(data)

        debug_data = 'binary' if binary else body
        debug_data = debug_data if debug_data else 'EMPTY'
        if not headers:
            headers = {'Authorization': 'Basic %s' % self.auth}
        else:
            headers['Authorization'] = 'Basic %s' % self.auth
        conn = None
        if self.ssl:
            conn = httplib.HTTPSConnection(self.server,
                                           self.port,
                                           timeout=self.timeout)
            if conn is None:
                LOG.error(
                    _LE('vdirectRESTClient: Could not establish HTTPS '
                        'connection'))
                return 0, None, None, None
        else:
            conn = httplib.HTTPConnection(self.server,
                                          self.port,
                                          timeout=self.timeout)
            if conn is None:
                LOG.error(
                    _LE('vdirectRESTClient: Could not establish HTTP '
                        'connection'))
                return 0, None, None, None

        try:
            conn.request(action, uri, body, headers)
            response = conn.getresponse()
            respstr = response.read()
            respdata = respstr
            try:
                respdata = jsonutils.loads(respstr)
            except ValueError:
                # response was not JSON, ignore the exception
                pass
            ret = (response.status, response.reason, respstr, respdata)
        except Exception as e:
            log_dict = {'action': action, 'e': e}
            LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'),
                      log_dict)
            ret = -1, None, None, None
        conn.close()
        return ret
Exemple #17
0
    def extend_resources(self, version, attr_map):
        """Extend resources with additional resources or attributes.

        :param: attr_map, the existing mapping from resource name to
        attrs definition.

        After this function, we will extend the attr_map if an extension
        wants to extend this map.
        """
        update_exts = []
        processed_exts = set()
        exts_to_process = self.extensions.copy()
        # Iterate until there are unprocessed extensions or if no progress
        # is made in a whole iteration
        while exts_to_process:
            processed_ext_count = len(processed_exts)
            for ext_name, ext in exts_to_process.items():
                if not hasattr(ext, 'get_extended_resources'):
                    del exts_to_process[ext_name]
                    continue
                if hasattr(ext, 'update_attributes_map'):
                    update_exts.append(ext)
                if hasattr(ext, 'get_required_extensions'):
                    # Process extension only if all required extensions
                    # have been processed already
                    required_exts_set = set(ext.get_required_extensions())
                    if required_exts_set - processed_exts:
                        continue
                try:
                    extended_attrs = ext.get_extended_resources(version)
                    for resource, resource_attrs in extended_attrs.iteritems():
                        if attr_map.get(resource, None):
                            attr_map[resource].update(resource_attrs)
                        else:
                            attr_map[resource] = resource_attrs
                except AttributeError:
                    LOG.exception(
                        _LE("Error fetching extended attributes for "
                            "extension '%s'"), ext.get_name())
                processed_exts.add(ext_name)
                del exts_to_process[ext_name]
            if len(processed_exts) == processed_ext_count:
                # Exit loop as no progress was made
                break
        if exts_to_process:
            # NOTE(salv-orlando): Consider whether this error should be fatal
            LOG.error(
                _LE("It was impossible to process the following "
                    "extensions: %s because of missing requirements."),
                ','.join(exts_to_process.keys()))

        # Extending extensions' attributes map.
        for ext in update_exts:
            ext.update_attributes_map(attr_map)
    def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips,
                                                  device_owner, local_vlan):
        if port.vif_id in self.local_ports:
            # throw an error if CSNAT port is already on a different
            # dvr routed subnet
            ovsport = self.local_ports[port.vif_id]
            subs = list(ovsport.get_subnets())
            LOG.error(_LE("Centralized-SNAT port %s already seen on "),
                      port.vif_id)
            LOG.error(_LE("a different subnet %s"), subs[0])
            return
        # since centralized-SNAT (CSNAT) port must have only one fixed
        # IP, directly use fixed_ips[0]
        subnet_uuid = fixed_ips[0]['subnet_id']
        ldm = None
        subnet_info = None
        if subnet_uuid not in self.local_dvr_map:
            # no csnat ports seen on this subnet - create csnat state
            # for this subnet
            subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
                                                             subnet_uuid)
            ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
            self.local_dvr_map[subnet_uuid] = ldm
        else:
            ldm = self.local_dvr_map[subnet_uuid]
            subnet_info = ldm.get_subnet_info()
            # Store csnat OF Port in the existing DVRSubnetMap
            ldm.set_csnat_ofport(port.ofport)

        # create ovsPort footprint for csnat port
        ovsport = OVSPort(port.vif_id, port.ofport,
                          port.vif_mac, device_owner)
        ovsport.add_subnet(subnet_uuid)
        self.local_ports[port.vif_id] = ovsport

        self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
                             priority=4,
                             dl_vlan=local_vlan,
                             dl_dst=ovsport.get_mac(),
                             actions="strip_vlan,mod_dl_src:%s,"
                             " output:%s" %
                             (subnet_info['gateway_mac'],
                              ovsport.get_ofport()))
        ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
        ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
        ip_subnet = subnet_info['cidr']
        self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
                             priority=2,
                             proto='ip',
                             dl_vlan=local_vlan,
                             nw_dst=ip_subnet,
                             actions="strip_vlan,mod_dl_src:%s,"
                             " output:%s" %
                             (subnet_info['gateway_mac'], ofports))
Exemple #19
0
 def _invoke_driver(self, context, meterings, func_name):
     try:
         return getattr(self.metering_driver, func_name)(context, meterings)
     except AttributeError:
         LOG.exception(_LE("Driver %(driver)s does not implement %(func)s"),
                       {'driver': self.conf.driver,
                        'func': func_name})
     except RuntimeError:
         LOG.exception(_LE("Driver %(driver)s:%(func)s runtime error"),
                       {'driver': self.conf.driver,
                        'func': func_name})
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
                                              timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(_LW("Time since last L3 agent reschedule check has "
                         "exceeded the interval between checks. Waiting "
                         "before check to allow agents to send a heartbeat "
                         "in case there was a clock adjustment."))
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(
            seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            for binding in down_bindings:
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except db_exc.DBError:
            # Catch DB errors here so a transient DB connectivity issue
            # doesn't stop the loopingcall.
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
    def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips,
                                                  device_owner, local_vlan):
        if port.vif_id in self.local_ports:
            # throw an error if CSNAT port is already on a different
            # dvr routed subnet
            ovsport = self.local_ports[port.vif_id]
            subs = list(ovsport.get_subnets())
            LOG.error(_LE("Centralized-SNAT port %s already seen on "),
                      port.vif_id)
            LOG.error(_LE("a different subnet %s"), subs[0])
            return
        # since centralized-SNAT (CSNAT) port must have only one fixed
        # IP, directly use fixed_ips[0]
        subnet_uuid = fixed_ips[0]['subnet_id']
        ldm = None
        subnet_info = None
        if subnet_uuid not in self.local_dvr_map:
            # no csnat ports seen on this subnet - create csnat state
            # for this subnet
            subnet_info = self.plugin_rpc.get_subnet_for_dvr(
                self.context, subnet_uuid)
            ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
            self.local_dvr_map[subnet_uuid] = ldm
        else:
            ldm = self.local_dvr_map[subnet_uuid]
            subnet_info = ldm.get_subnet_info()
            # Store csnat OF Port in the existing DVRSubnetMap
            ldm.set_csnat_ofport(port.ofport)

        # create ovsPort footprint for csnat port
        ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner)
        ovsport.add_subnet(subnet_uuid)
        self.local_ports[port.vif_id] = ovsport

        self.int_br.add_flow(
            table=constants.DVR_TO_SRC_MAC,
            priority=4,
            dl_vlan=local_vlan,
            dl_dst=ovsport.get_mac(),
            actions="strip_vlan,mod_dl_src:%s,"
            " output:%s" % (subnet_info['gateway_mac'], ovsport.get_ofport()))
        ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
        ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
        ip_subnet = subnet_info['cidr']
        self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
                             priority=2,
                             proto='ip',
                             dl_vlan=local_vlan,
                             nw_dst=ip_subnet,
                             actions="strip_vlan,mod_dl_src:%s,"
                             " output:%s" %
                             (subnet_info['gateway_mac'], ofports))
    def provision_local_vlan(self, net_uuid, network_type, physical_network,
                             segmentation_id):
        """Provisions a local VLAN.

        :param net_uuid: the uuid of the network associated with this vlan.
        :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
                                               'local')
        :param physical_network: the physical network for 'vlan' or 'flat'
        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
        """

        if not self.available_local_vlans:
            LOG.error(_LE("No local VLAN available for net-id=%s"), net_uuid)
            return
        lvid = self.available_local_vlans.pop()
        LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
                     "net-id=%(net_uuid)s"),
                 {'vlan_id': lvid, 'net_uuid': net_uuid})
        self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type,
                                                         physical_network,
                                                         segmentation_id)

        if network_type in constants.TUNNEL_NETWORK_TYPES:
            if self.enable_tunneling:
                self.int_br.provision_tenant_tunnel(network_type, lvid,
                                                    segmentation_id)
            else:
                LOG.error(_LE("Cannot provision %(network_type)s network for "
                              "net-id=%(net_uuid)s - tunneling disabled"),
                          {'network_type': network_type,
                           'net_uuid': net_uuid})
        elif network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT]:
            if physical_network in self.int_ofports:
                phys_port = self.int_ofports[physical_network]
                self.int_br.provision_tenant_physnet(network_type, lvid,
                                                     segmentation_id,
                                                     phys_port)
            else:
                LOG.error(_LE("Cannot provision %(network_type)s network for "
                              "net-id=%(net_uuid)s - no bridge for "
                              "physical_network %(physical_network)s"),
                          {'network_type': network_type,
                           'net_uuid': net_uuid,
                           'physical_network': physical_network})
        elif network_type == p_const.TYPE_LOCAL:
            # no flows needed for local networks
            pass
        else:
            LOG.error(_LE("Cannot provision unknown network type "
                          "%(network_type)s for net-id=%(net_uuid)s"),
                      {'network_type': network_type,
                       'net_uuid': net_uuid})
Exemple #23
0
    def extend_resources(self, version, attr_map):
        """Extend resources with additional resources or attributes.

        :param: attr_map, the existing mapping from resource name to
        attrs definition.

        After this function, we will extend the attr_map if an extension
        wants to extend this map.
        """
        update_exts = []
        processed_exts = set()
        exts_to_process = self.extensions.copy()
        # Iterate until there are unprocessed extensions or if no progress
        # is made in a whole iteration
        while exts_to_process:
            processed_ext_count = len(processed_exts)
            for ext_name, ext in exts_to_process.items():
                if not hasattr(ext, 'get_extended_resources'):
                    del exts_to_process[ext_name]
                    continue
                if hasattr(ext, 'update_attributes_map'):
                    update_exts.append(ext)
                if hasattr(ext, 'get_required_extensions'):
                    # Process extension only if all required extensions
                    # have been processed already
                    required_exts_set = set(ext.get_required_extensions())
                    if required_exts_set - processed_exts:
                        continue
                try:
                    extended_attrs = ext.get_extended_resources(version)
                    for resource, resource_attrs in extended_attrs.iteritems():
                        if attr_map.get(resource, None):
                            attr_map[resource].update(resource_attrs)
                        else:
                            attr_map[resource] = resource_attrs
                except AttributeError:
                    LOG.exception(_LE("Error fetching extended attributes for "
                                      "extension '%s'"), ext.get_name())
                processed_exts.add(ext_name)
                del exts_to_process[ext_name]
            if len(processed_exts) == processed_ext_count:
                # Exit loop as no progress was made
                break
        if exts_to_process:
            # NOTE(salv-orlando): Consider whether this error should be fatal
            LOG.error(_LE("It was impossible to process the following "
                          "extensions: %s because of missing requirements."),
                      ','.join(exts_to_process.keys()))

        # Extending extensions' attributes map.
        for ext in update_exts:
            ext.update_attributes_map(attr_map)
 def _invoke_driver_for_plugin_api(self, context, fw, func_name):
     """Invoke driver method for plugin API and provide status back."""
     LOG.debug("%(func_name)s from agent for fw: %(fwid)s", {
         'func_name': func_name,
         'fwid': fw['id']
     })
     try:
         routers = self.plugin_rpc.get_routers(context)
         router_info_list = self._get_router_info_list_for_tenant(
             routers, fw['tenant_id'])
         if not router_info_list:
             LOG.debug('No Routers on tenant: %s', fw['tenant_id'])
             # fw was created before any routers were added, and if a
             # delete is sent then we need to ack so that plugin can
             # cleanup.
             if func_name == 'delete_firewall':
                 self.fwplugin_rpc.firewall_deleted(context, fw['id'])
             return
         LOG.debug("Apply fw on Router List: '%s'",
                   [ri.router['id'] for ri in router_info_list])
         # call into the driver
         try:
             self.fwaas_driver.__getattribute__(func_name)(
                 self.conf.agent_mode, router_info_list, fw)
             if fw['admin_state_up']:
                 status = constants.ACTIVE
             else:
                 status = constants.DOWN
         except fw_ext.FirewallInternalDriverError:
             LOG.error(
                 _LE("Firewall Driver Error for %(func_name)s "
                     "for fw: %(fwid)s"), {
                         'func_name': func_name,
                         'fwid': fw['id']
                     })
             status = constants.ERROR
         # delete needs different handling
         if func_name == 'delete_firewall':
             if status in [constants.ACTIVE, constants.DOWN]:
                 self.fwplugin_rpc.firewall_deleted(context, fw['id'])
         else:
             self.fwplugin_rpc.set_firewall_status(context, fw['id'],
                                                   status)
     except Exception:
         LOG.exception(
             _LE("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"), {
                 'func_name': func_name,
                 'fwid': fw['id']
             })
         self.services_sync = True
     return
Exemple #25
0
    def _call(self, action, resource, data, headers, binary=False):
        if resource.startswith('http'):
            uri = resource
        else:
            uri = self.base_uri + resource
        if binary:
            body = data
        else:
            body = jsonutils.dumps(data)

        debug_data = 'binary' if binary else body
        debug_data = debug_data if debug_data else 'EMPTY'
        if not headers:
            headers = {'Authorization': 'Basic %s' % self.auth}
        else:
            headers['Authorization'] = 'Basic %s' % self.auth
        conn = None
        if self.ssl:
            conn = httplib.HTTPSConnection(
                self.server, self.port, timeout=self.timeout)
            if conn is None:
                LOG.error(_LE('vdirectRESTClient: Could not establish HTTPS '
                          'connection'))
                return 0, None, None, None
        else:
            conn = httplib.HTTPConnection(
                self.server, self.port, timeout=self.timeout)
            if conn is None:
                LOG.error(_LE('vdirectRESTClient: Could not establish HTTP '
                          'connection'))
                return 0, None, None, None

        try:
            conn.request(action, uri, body, headers)
            response = conn.getresponse()
            respstr = response.read()
            respdata = respstr
            try:
                respdata = jsonutils.loads(respstr)
            except ValueError:
                # response was not JSON, ignore the exception
                pass
            ret = (response.status, response.reason, respstr, respdata)
        except Exception as e:
            log_dict = {'action': action, 'e': e}
            LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'),
                      log_dict)
            ret = -1, None, None, None
        conn.close()
        return ret
Exemple #26
0
 def _invoke_driver_for_plugin_api(self, context, fw, func_name):
     """Invoke driver method for plugin API and provide status back."""
     LOG.debug("%(func_name)s from agent for fw: %(fwid)s",
               {'func_name': func_name, 'fwid': fw['id']})
     try:
         routers = self.plugin_rpc.get_routers(context)
         router_info_list = self._get_router_info_list_for_tenant(
             routers,
             fw['tenant_id'])
         if not router_info_list:
             LOG.debug('No Routers on tenant: %s', fw['tenant_id'])
             # fw was created before any routers were added, and if a
             # delete is sent then we need to ack so that plugin can
             # cleanup.
             if func_name == 'delete_firewall':
                 self.fwplugin_rpc.firewall_deleted(context, fw['id'])
             return
         LOG.debug("Apply fw on Router List: '%s'",
                   [ri.router['id'] for ri in router_info_list])
         # call into the driver
         try:
             self.fwaas_driver.__getattribute__(func_name)(
                 self.conf.agent_mode,
                 router_info_list,
                 fw)
             if fw['admin_state_up']:
                 status = constants.ACTIVE
             else:
                 status = constants.DOWN
         except fw_ext.FirewallInternalDriverError:
             LOG.error(_LE("Firewall Driver Error for %(func_name)s "
                           "for fw: %(fwid)s"),
                       {'func_name': func_name, 'fwid': fw['id']})
             status = constants.ERROR
         # delete needs different handling
         if func_name == 'delete_firewall':
             if status in [constants.ACTIVE, constants.DOWN]:
                 self.fwplugin_rpc.firewall_deleted(context, fw['id'])
         else:
             self.fwplugin_rpc.set_firewall_status(
                 context,
                 fw['id'],
                 status)
     except Exception:
         LOG.exception(
             _LE("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"),
             {'func_name': func_name, 'fwid': fw['id']})
         self.services_sync = True
     return
 def _set_ha_HSRP(self, subinterface, vrf_name, priority, group, ip):
     if vrf_name not in self._get_vrfs():
         LOG.error(_LE("VRF %s not present"), vrf_name)
     confstr = snippets.SET_INTC_HSRP % (subinterface, vrf_name, group,
                                         priority, group, ip)
     action = "SET_INTC_HSRP (Group: %s, Priority: % s)" % (group, priority)
     self._edit_running_config(confstr, action)
def check_ebtables():
    result = checks.ebtables_supported()
    if not result:
        LOG.error(
            _LE('Cannot run ebtables. Please ensure that it '
                'is installed.'))
    return result
Exemple #29
0
    def remove_router_interface(self, context, router_id, interface_info):
        """Remove a subnet of a network from an existing router."""

        new_router = (
                   super(AristaL3ServicePlugin, self).remove_router_interface(
                         context, router_id, interface_info))

        # Get network information of the subnet that is being removed
        subnet = self.get_subnet(context, new_router['subnet_id'])
        network_id = subnet['network_id']

        # For SVI removal from Arista HW, segmentation ID is needed
        ml2_db = NetworkContext(self, context, {'id': network_id})
        seg_id = ml2_db.network_segments[0]['segmentation_id']

        router = super(AristaL3ServicePlugin, self).get_router(context,
                                                               router_id)
        router_info = copy.deepcopy(new_router)
        router_info['seg_id'] = seg_id
        router_info['name'] = router['name']

        try:
            self.driver.remove_router_interface(context, router_info)
            return new_router
        except Exception as exc:
            LOG.error(_LE("Error removing interface %(interface)s from "
                          "router %(router_id)s on Arista HW"
                          "Exception =(exc)s"),
                      {'interface': interface_info, 'router_id': router_id,
                       'exc': exc})
 def daemon_loop(self):
     LOG.info(_("eSwitch Agent Started!"))
     sync = True
     port_info = {'current': set(),
                  'added': set(),
                  'removed': set(),
                  'updated': set()}
     while True:
         start = time.time()
         try:
             port_info = self.scan_ports(previous=port_info, sync=sync)
         except exceptions.RequestTimeout:
             LOG.exception(_("Request timeout in agent event loop "
                             "eSwitchD is not responding - exiting..."))
             raise SystemExit(1)
         if sync:
             LOG.info(_LI("Agent out of sync with plugin!"))
             sync = False
         if self._port_info_has_changes(port_info):
             LOG.debug("Starting to process devices in:%s", port_info)
             try:
                 sync = self.process_network_ports(port_info)
             except Exception:
                 LOG.exception(_LE("Error in agent event loop"))
                 sync = True
         # sleep till end of polling interval
         elapsed = (time.time() - start)
         if (elapsed < self._polling_interval):
             time.sleep(self._polling_interval - elapsed)
         else:
             LOG.debug(_("Loop iteration exceeded interval "
                         "(%(polling_interval)s vs. %(elapsed)s)"),
                       {'polling_interval': self._polling_interval,
                        'elapsed': elapsed})
Exemple #31
0
    def _remove_workflow(self, ids, context, post_remove_function):

        wf_name = ids['pool']
        LOG.debug('Remove the workflow %s' % wf_name)
        resource = '/api/workflow/%s' % (wf_name)
        rest_return = self.rest_client.call('DELETE', resource, None, None)
        response = _rest_wrapper(rest_return, [204, 202, 404])
        if rest_return[RESP_STATUS] == 404:
            if post_remove_function:
                try:
                    post_remove_function(True)
                    LOG.debug('Post-remove workflow function %r completed',
                              post_remove_function)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(
                            _LE('Post-remove workflow function '
                                '%r failed'), post_remove_function)
            self.plugin._delete_db_vip(context, ids['vip'])
        else:
            oper = OperationAttributes(response['uri'],
                                       ids,
                                       lb_db.Vip,
                                       ids['vip'],
                                       delete=True,
                                       post_op_function=post_remove_function)
            LOG.debug('Pushing operation %s to the queue', oper)

            self._start_completion_handling_thread()
            self.queue.put_nowait(oper)
    def delete_network_postcommit(self, mech_context):
        """Delete network which translates to removng portprofile
        from the switch.
        """

        LOG.debug("delete_network_postcommit: called")
        network = mech_context.current
        network_id = network['id']
        vlan_id = network['provider:segmentation_id']
        tenant_id = network['tenant_id']

        try:
            self._driver.delete_network(self._switch['address'],
                                        self._switch['username'],
                                        self._switch['password'],
                                        vlan_id)
        except Exception:
            LOG.exception(_LE("Brocade NOS driver: failed to delete network"))
            raise Exception(
                _("Brocade switch exception, "
                  "delete_network_postcommit failed"))

        LOG.info(_LI("delete network (postcommit): %(network_id)s"
                     " with vlan = %(vlan_id)s"
                     " for tenant %(tenant_id)s"),
                {'network_id': network_id,
                 'vlan_id': vlan_id,
                 'tenant_id': tenant_id})
    def delete_network_postcommit(self, mech_context):
        """Delete network which translates to removng portprofile
        from the switch.
        """

        LOG.debug("delete_network_postcommit: called")
        network = mech_context.current
        network_id = network['id']
        vlan_id = network['provider:segmentation_id']
        tenant_id = network['tenant_id']

        try:
            self._driver.delete_network(self._switch['address'],
                                        self._switch['username'],
                                        self._switch['password'], vlan_id)
        except Exception:
            LOG.exception(_LE("Brocade NOS driver: failed to delete network"))
            raise Exception(
                _("Brocade switch exception, "
                  "delete_network_postcommit failed"))

        LOG.info(
            _LI("delete network (postcommit): %(network_id)s"
                " with vlan = %(vlan_id)s"
                " for tenant %(tenant_id)s"), {
                    'network_id': network_id,
                    'vlan_id': vlan_id,
                    'tenant_id': tenant_id
                })
 def _get_enabled_agents(self, context, network, agents, method, payload):
     """Get the list of agents whose admin_state is UP."""
     network_id = network['id']
     enabled_agents = [x for x in agents if x.admin_state_up]
     active_agents = [x for x in agents if x.is_active]
     len_enabled_agents = len(enabled_agents)
     len_active_agents = len(active_agents)
     if len_active_agents < len_enabled_agents:
         LOG.warn(_LW("Only %(active)d of %(total)d DHCP agents associated "
                      "with network '%(net_id)s' are marked as active, so "
                      "notifications may be sent to inactive agents."),
                  {'active': len_active_agents,
                   'total': len_enabled_agents,
                   'net_id': network_id})
     if not enabled_agents:
         num_ports = self.plugin.get_ports_count(
             context, {'network_id': [network_id]})
         notification_required = (
             num_ports > 0 and len(network['subnets']) >= 1)
         if notification_required:
             LOG.error(_LE("Will not send event %(method)s for network "
                           "%(net_id)s: no agent available. Payload: "
                           "%(payload)s"),
                       {'method': method,
                        'net_id': network_id,
                        'payload': payload})
     return enabled_agents
Exemple #35
0
    def sync_allocations(self):

        # determine current configured allocatable gres
        gre_ids = set()
        for gre_id_range in self.tunnel_ranges:
            tun_min, tun_max = gre_id_range
            if tun_max + 1 - tun_min > 1000000:
                LOG.error(
                    _LE("Skipping unreasonable gre ID range "
                        "%(tun_min)s:%(tun_max)s"), {
                            'tun_min': tun_min,
                            'tun_max': tun_max
                        })
            else:
                gre_ids |= set(moves.xrange(tun_min, tun_max + 1))

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            # remove from table unallocated tunnels not currently allocatable
            allocs = (session.query(GreAllocation).all())
            for alloc in allocs:
                try:
                    # see if tunnel is allocatable
                    gre_ids.remove(alloc.gre_id)
                except KeyError:
                    # it's not allocatable, so check if its allocated
                    if not alloc.allocated:
                        # it's not, so remove it from table
                        LOG.debug("Removing tunnel %s from pool", alloc.gre_id)
                        session.delete(alloc)

            # add missing allocatable tunnels to table
            for gre_id in sorted(gre_ids):
                alloc = GreAllocation(gre_id=gre_id)
                session.add(alloc)
Exemple #36
0
 def _create_dvr_mac_address(self, context, host):
     """Create DVR mac address for a given host."""
     base_mac = cfg.CONF.dvr_base_mac.split(':')
     max_retries = cfg.CONF.mac_generation_retries
     for attempt in reversed(range(max_retries)):
         try:
             with context.session.begin(subtransactions=True):
                 mac_address = utils.get_random_mac(base_mac)
                 dvr_mac_binding = DistributedVirtualRouterMacAddress(
                     host=host, mac_address=mac_address)
                 context.session.add(dvr_mac_binding)
                 LOG.debug(
                     "Generated DVR mac for host %(host)s "
                     "is %(mac_address)s", {
                         'host': host,
                         'mac_address': mac_address
                     })
             dvr_macs = self.get_dvr_mac_address_list(context)
             # TODO(vivek): improve scalability of this fanout by
             # sending a single mac address rather than the entire set
             self.notifier.dvr_mac_address_update(context, dvr_macs)
             return self._make_dvr_mac_address_dict(dvr_mac_binding)
         except db_exc.DBDuplicateEntry:
             LOG.debug(
                 "Generated DVR mac %(mac)s exists."
                 " Remaining attempts %(attempts_left)s.", {
                     'mac': mac_address,
                     'attempts_left': attempt
                 })
     LOG.error(_LE("MAC generation error after %s attempts"), max_retries)
     raise ext_dvr.MacAddressGenerationFailure(host=host)
Exemple #37
0
    def call_driver(self, action, network, **action_kwargs):
        """Invoke an action on a DHCP driver instance."""
        LOG.debug('Calling driver for network: %(net)s action: %(action)s',
                  {'net': network.id, 'action': action})
        try:
            # the Driver expects something that is duck typed similar to
            # the base models.
            driver = self.dhcp_driver_cls(self.conf,
                                          network,
                                          self.root_helper,
                                          self.dhcp_version,
                                          self.plugin_rpc)

            getattr(driver, action)(**action_kwargs)
            return True
        except exceptions.Conflict:
            # No need to resync here, the agent will receive the event related
            # to a status update for the network
            LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
                            'is a conflict with its current state; please '
                            'check that the network and/or its subnet(s) '
                            'still exist.'),
                        {'net_id': network.id, 'action': action})
        except Exception as e:
            self.schedule_resync(e, network.id)
            if (isinstance(e, messaging.RemoteError)
                and e.exc_type == 'NetworkNotFound'
                or isinstance(e, exceptions.NetworkNotFound)):
                LOG.warning(_LW("Network %s has been deleted."), network.id)
            else:
                LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.')
                              % {'net_id': network.id, 'action': action})
Exemple #38
0
def _set_rules(data):
    default_rule = 'default'
    LOG.debug(_("Loading policies from file: %s"), _POLICY_PATH)
    # Ensure backward compatibility with folsom/grizzly convention
    # for extension rules
    policies = policy.Rules.load_json(data, default_rule)
    for pol in policies.keys():
        if any([pol.startswith(depr_pol) for depr_pol in
                DEPRECATED_POLICY_MAP.keys()]):
            LOG.warn(_LW("Found deprecated policy rule:%s. Please consider "
                         "upgrading your policy configuration file"), pol)
            pol_name, action = pol.rsplit(':', 1)
            try:
                new_actions = DEPRECATED_ACTION_MAP[action]
                new_policies = DEPRECATED_POLICY_MAP[pol_name]
                # bind new actions and policies together
                for actual_policy in ['_'.join(item) for item in
                                      itertools.product(new_actions,
                                                        new_policies)]:
                    if actual_policy not in policies:
                        # New policy, same rule
                        LOG.info(_LI("Inserting policy:%(new_policy)s in "
                                     "place of deprecated "
                                     "policy:%(old_policy)s"),
                                 {'new_policy': actual_policy,
                                  'old_policy': pol})
                        policies[actual_policy] = policies[pol]
                # Remove old-style policy
                del policies[pol]
            except KeyError:
                LOG.error(_LE("Backward compatibility unavailable for "
                              "deprecated policy %s. The policy will "
                              "not be enforced"), pol)
    policy.set_rules(policies)
Exemple #39
0
    def _process_data(self, ctxt, version, method, namespace, args):
        """Process a message in a new thread.

        If the proxy object we have has a dispatch method
        (see rpc.dispatcher.RpcDispatcher), pass it the version,
        method, and args and let it dispatch as appropriate.  If not, use
        the old behavior of magically calling the specified method on the
        proxy we have here.
        """
        ctxt.update_store()
        try:
            rval = self.proxy.dispatch(ctxt, version, method, namespace,
                                       **args)
            # Check if the result was a generator
            if inspect.isgenerator(rval):
                for x in rval:
                    ctxt.reply(x, None, connection_pool=self.connection_pool)
            else:
                ctxt.reply(rval, None, connection_pool=self.connection_pool)
            # This final None tells multicall that it is done.
            ctxt.reply(ending=True, connection_pool=self.connection_pool)
        except rpc_common.ClientException as e:
            LOG.debug('Expected exception during message handling (%s)' %
                      e._exc_info[1])
            ctxt.reply(None, e._exc_info,
                       connection_pool=self.connection_pool,
                       log_failure=False)
        except Exception:
            # sys.exc_info() is deleted by LOG.exception().
            exc_info = sys.exc_info()
            LOG.error(_LE('Exception during message handling'),
                      exc_info=exc_info)
            ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
Exemple #40
0
    def _wrap(self, *args, **kwargs):
        try:
            assert issubclass(
                self.__class__, sqlalchemy.orm.session.Session
            ), ('_wrap_db_error() can only be applied to methods of '
                'subclasses of sqlalchemy.orm.session.Session.')

            return f(self, *args, **kwargs)
        except UnicodeEncodeError:
            raise exception.DBInvalidUnicodeParameter()
        except sqla_exc.OperationalError as e:
            _raise_if_db_connection_lost(e, self.bind)
            _raise_if_deadlock_error(e, self.bind.dialect.name)
            # NOTE(comstud): A lot of code is checking for OperationalError
            # so let's not wrap it for now.
            raise
        # note(boris-42): We should catch unique constraint violation and
        # wrap it by our own DBDuplicateEntry exception. Unique constraint
        # violation is wrapped by IntegrityError.
        except sqla_exc.IntegrityError as e:
            # note(boris-42): SqlAlchemy doesn't unify errors from different
            # DBs so we must do this. Also in some tables (for example
            # instance_types) there are more than one unique constraint. This
            # means we should get names of columns, which values violate
            # unique constraint, from error message.
            _raise_if_duplicate_entry_error(e, self.bind.dialect.name)
            raise exception.DBError(e)
        except Exception as e:
            LOG.exception(_LE('DB exception wrapped.'))
            raise exception.DBError(e)
Exemple #41
0
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
Exemple #42
0
    def nova_services_up(self):
        """Checks if required Nova services are up and running.

        returns: True if all needed Nova services are up, False otherwise
        """
        required = set([
            'nova-conductor', 'nova-cert', 'nova-scheduler', 'nova-compute',
            'nova-consoleauth'
        ])
        try:
            services = self._nclient.services.list()
        # There are several individual Nova client exceptions but they have
        # no other common base than Exception, hence the long list.
        except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
                nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
                nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
                nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
                nova_exc.ConnectionRefused, nova_exc.ClientException,
                Exception) as e:
            LOG.error(_LE('Failure determining running Nova services: %s'), e)
            return False
        return not bool(
            required.difference([
                service.binary for service in services
                if service.status == 'enabled' and service.state == 'up'
            ]))
Exemple #43
0
 def add_tunnel_port(self, port_name, remote_ip, local_ip,
                     tunnel_type=constants.TYPE_GRE,
                     vxlan_udp_port=constants.VXLAN_UDP_PORT,
                     dont_fragment=True):
     vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
                      port_name]
     vsctl_command.extend(["--", "set", "Interface", port_name,
                           "type=%s" % tunnel_type])
     if tunnel_type == constants.TYPE_VXLAN:
         # Only set the VXLAN UDP port if it's not the default
         if vxlan_udp_port != constants.VXLAN_UDP_PORT:
             vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
     vsctl_command.append(("options:df_default=%s" %
                          bool(dont_fragment)).lower())
     vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
                           "options:local_ip=%s" % local_ip,
                           "options:in_key=flow",
                           "options:out_key=flow"])
     self.run_vsctl(vsctl_command)
     ofport = self.get_port_ofport(port_name)
     if (tunnel_type == constants.TYPE_VXLAN and
             ofport == INVALID_OFPORT):
         LOG.error(_LE('Unable to create VXLAN tunnel port. Please ensure '
                       'that an openvswitch version that supports VXLAN is '
                       'installed.'))
     return ofport
Exemple #44
0
    def _run_openstack_l3_cmds(self, commands, server):
        """Execute/sends a CAPI (Command API) command to EOS.

        In this method, list of commands is appended with prefix and
        postfix commands - to make is understandble by EOS.

        :param commands : List of command to be executed on EOS.
        :param server: Server endpoint on the Arista switch to be configured
        """
        command_start = ['enable', 'configure']
        command_end = ['exit']
        full_command = command_start + commands + command_end

        LOG.info(_('Executing command on Arista EOS: %s'), full_command)

        try:
            # this returns array of return values for every command in
            # full_command list
            ret = server.runCmds(version=1, cmds=full_command)
            LOG.info(_('Results of execution on Arista EOS: %s'), ret)

        except Exception:
            msg = (_LE("Error occured while trying to execute "
                     "commands %(cmd)s on EOS %(host)s"),
                   {'cmd': full_command, 'host': server})
            LOG.exception(msg)
            raise arista_exc.AristaServicePluginRpcError(msg=msg)
Exemple #45
0
    def _call_on_drivers(self,
                         method_name,
                         context,
                         continue_on_failure=False):
        """Helper method for calling a method across all mechanism drivers.

        :param method_name: name of the method to call
        :param context: context parameter to pass to each method call
        :param continue_on_failure: whether or not to continue to call
        all mechanism drivers once one has raised an exception
        :raises: neutron.plugins.ml2.common.MechanismDriverError
        if any mechanism driver call fails.
        """
        error = False
        for driver in self.ordered_mech_drivers:
            try:
                getattr(driver.obj, method_name)(context)
            except Exception:
                LOG.exception(
                    _LE("Mechanism driver '%(name)s' failed in %(method)s"), {
                        'name': driver.name,
                        'method': method_name
                    })
                error = True
                if not continue_on_failure:
                    break
        if error:
            raise ml2_exc.MechanismDriverError(method=method_name)
Exemple #46
0
 def enough_candidates_for_ha(self, candidates):
     if not candidates or len(candidates) < self.min_ha_agents:
         LOG.error(
             _LE("Not enough candidates, a HA router needs at least "
                 "%s agents"), self.min_ha_agents)
         return False
     return True
Exemple #47
0
def check_nova_notify():
    result = checks.nova_notify_supported()
    if not result:
        LOG.error(_LE('Nova notifications are enabled, but novaclient is not '
                      'installed. Either disable nova notifications or '
                      'install python-novaclient.'))
    return result
Exemple #48
0
    def remove_router_interface(self, context, router_id, interface_info):
        """Remove a subnet of a network from an existing router."""

        new_router = (
                   super(AristaL3ServicePlugin, self).remove_router_interface(
                         context, router_id, interface_info))

        # Get network information of the subnet that is being removed
        subnet = self.get_subnet(context, new_router['subnet_id'])
        network_id = subnet['network_id']

        # For SVI removal from Arista HW, segmentation ID is needed
        ml2_db = NetworkContext(self, context, {'id': network_id})
        seg_id = ml2_db.network_segments[0]['segmentation_id']

        router = super(AristaL3ServicePlugin, self).get_router(context,
                                                               router_id)
        router_info = copy.deepcopy(new_router)
        router_info['seg_id'] = seg_id
        router_info['name'] = router['name']

        try:
            self.driver.remove_router_interface(context, router_info)
            return new_router
        except Exception as exc:
            LOG.error(_LE("Error removing interface %(interface)s from "
                          "router %(router_id)s on Arista HW"
                          "Exception =(exc)s"),
                      {'interface': interface_info, 'router_id': router_id,
                       'exc': exc})
    def delete_network_precommit(self, mech_context):
        """Delete Network from the plugin specific database table."""

        LOG.debug("delete_network_precommit: called")

        network = mech_context.current
        network_id = network['id']
        vlan_id = network['provider:segmentation_id']
        tenant_id = network['tenant_id']

        context = mech_context._plugin_context

        try:
            brocade_db.delete_network(context, network_id)
        except Exception:
            LOG.exception(
                _LE("Brocade Mechanism: failed to delete network in db"))
            raise Exception(
                _("Brocade Mechanism: delete_network_precommit failed"))

        LOG.info(
            _LI("delete network (precommit): %(network_id)s"
                " with vlan = %(vlan_id)s"
                " for tenant %(tenant_id)s"), {
                    'network_id': network_id,
                    'vlan_id': vlan_id,
                    'tenant_id': tenant_id
                })
Exemple #50
0
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
    def create_port_precommit(self, mech_context):
        """Create logical port on the switch (db update)."""

        LOG.debug("create_port_precommit: called")

        port = mech_context.current
        port_id = port['id']
        network_id = port['network_id']
        tenant_id = port['tenant_id']
        admin_state_up = port['admin_state_up']

        context = mech_context._plugin_context

        network = brocade_db.get_network(context, network_id)
        vlan_id = network['vlan']

        try:
            brocade_db.create_port(context, port_id, network_id, None, vlan_id,
                                   tenant_id, admin_state_up)
        except Exception:
            LOG.exception(
                _LE("Brocade Mechanism: failed to create port"
                    " in db"))
            raise Exception(
                _("Brocade Mechanism: create_port_precommit failed"))
 def _add_type_and_hosting_device_info(self, context, router,
                                       binding_info=None, schedule=True):
     """Adds type and hosting device information to a router."""
     try:
         if binding_info is None:
             binding_info = self._get_router_binding_info(context,
                                                          router['id'])
     except RouterBindingInfoError:
         LOG.error(_LE('DB inconsistency: No hosting info associated with '
                     'router %s'), router['id'])
         router['hosting_device'] = None
         return
     router['router_type'] = {
         'id': None,
         'name': 'CSR1kv_router',
         'cfg_agent_driver': (cfg.CONF.hosting_devices
                              .csr1kv_cfgagent_router_driver)}
     if binding_info.hosting_device is None and schedule:
         # This router has not been scheduled to a hosting device
         # so we try to do it now.
         self.schedule_router_on_hosting_device(context, binding_info)
         context.session.expire(binding_info)
     if binding_info.hosting_device is None:
         router['hosting_device'] = None
     else:
         router['hosting_device'] = self.get_device_info_for_agent(
             binding_info.hosting_device)
    def delete_network_precommit(self, mech_context):
        """Delete Network from the plugin specific database table."""

        LOG.debug("delete_network_precommit: called")

        network = mech_context.current
        network_id = network['id']
        vlan_id = network['provider:segmentation_id']
        tenant_id = network['tenant_id']

        context = mech_context._plugin_context

        try:
            brocade_db.delete_network(context, network_id)
        except Exception:
            LOG.exception(
                _LE("Brocade Mechanism: failed to delete network in db"))
            raise Exception(
                _("Brocade Mechanism: delete_network_precommit failed"))

        LOG.info(_LI("delete network (precommit): %(network_id)s"
                     " with vlan = %(vlan_id)s"
                     " for tenant %(tenant_id)s"),
                {'network_id': network_id,
                 'vlan_id': vlan_id,
                 'tenant_id': tenant_id})
Exemple #54
0
    def apply_default_policy(self, agent_mode, apply_list, firewall):
        LOG.debug('Applying firewall %(fw_id)s for tenant %(tid)s)', {
            'fw_id': firewall['id'],
            'tid': firewall['tenant_id']
        })
        fwid = firewall['id']
        try:
            for router_info in apply_list:
                ipt_if_prefix_list = self._get_ipt_mgrs_with_if_prefix(
                    agent_mode, router_info)
                for ipt_if_prefix in ipt_if_prefix_list:
                    # the following only updates local memory; no hole in FW
                    ipt_mgr = ipt_if_prefix['ipt']
                    self._remove_chains(fwid, ipt_mgr)
                    self._remove_default_chains(ipt_mgr)

                    # create default 'DROP ALL' policy chain
                    self._add_default_policy_chain_v4v6(ipt_mgr)
                    self._enable_policy_chain(fwid, ipt_if_prefix)

                    # apply the changes immediately (no defer in firewall path)
                    ipt_mgr.defer_apply_off()
        except (LookupError, RuntimeError):
            # catch known library exceptions and raise Fwaas generic exception
            LOG.exception(
                _LE("Failed to apply default policy on firewall: %s"), fwid)
            raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
    def create_port_precommit(self, mech_context):
        """Create logical port on the switch (db update)."""

        LOG.debug("create_port_precommit: called")

        port = mech_context.current
        port_id = port['id']
        network_id = port['network_id']
        tenant_id = port['tenant_id']
        admin_state_up = port['admin_state_up']

        context = mech_context._plugin_context

        network = brocade_db.get_network(context, network_id)
        vlan_id = network['vlan']

        try:
            brocade_db.create_port(context, port_id, network_id,
                                   None,
                                   vlan_id, tenant_id, admin_state_up)
        except Exception:
            LOG.exception(_LE("Brocade Mechanism: failed to create port"
                              " in db"))
            raise Exception(
                _("Brocade Mechanism: create_port_precommit failed"))
    def setup_physical_bridges(self, bridge_mappings):
        """Setup the physical network bridges.

        Creates physical network bridges and links them to the
        integration bridge using veths.

        :param bridge_mappings: map physical network names to bridge names.
        """
        self.phys_brs = {}
        self.phys_ofports = {}
        ip_wrapper = ip_lib.IPWrapper(self.root_helper)
        for physical_network, bridge in bridge_mappings.iteritems():
            LOG.info(_LI("Mapping physical network %(physical_network)s to "
                         "bridge %(bridge)s"),
                     {'physical_network': physical_network,
                      'bridge': bridge})
            # setup physical bridge
            if not ip_lib.device_exists(bridge, self.root_helper):
                LOG.error(_LE("Bridge %(bridge)s for physical network "
                              "%(physical_network)s does not exist. Agent "
                              "terminated!"),
                          {'physical_network': physical_network,
                           'bridge': bridge})
                raise SystemExit(1)
            br = Bridge(bridge, self.root_helper, self.ryuapp)
            self.phys_brs[physical_network] = br

            self._phys_br_patch_physical_bridge_with_integration_bridge(
                br, physical_network, bridge, ip_wrapper)